Merge tag 'trace-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux/fpc-iii.git] / drivers / perf / xgene_pmu.c
blob633cf07ba6723ebf3dceb928783863eec887bcb2
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * APM X-Gene SoC PMU (Performance Monitor Unit)
5 * Copyright (c) 2016, Applied Micro Circuits Corporation
6 * Author: Hoan Tran <hotran@apm.com>
7 * Tai Nguyen <ttnguyen@apm.com>
8 */
10 #include <linux/acpi.h>
11 #include <linux/clk.h>
12 #include <linux/cpuhotplug.h>
13 #include <linux/cpumask.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/mfd/syscon.h>
17 #include <linux/module.h>
18 #include <linux/of_address.h>
19 #include <linux/of_fdt.h>
20 #include <linux/of_irq.h>
21 #include <linux/of_platform.h>
22 #include <linux/perf_event.h>
23 #include <linux/platform_device.h>
24 #include <linux/regmap.h>
25 #include <linux/slab.h>
27 #define CSW_CSWCR 0x0000
28 #define CSW_CSWCR_DUALMCB_MASK BIT(0)
29 #define CSW_CSWCR_MCB0_ROUTING(x) (((x) & 0x0C) >> 2)
30 #define CSW_CSWCR_MCB1_ROUTING(x) (((x) & 0x30) >> 4)
31 #define MCBADDRMR 0x0000
32 #define MCBADDRMR_DUALMCU_MODE_MASK BIT(2)
34 #define PCPPMU_INTSTATUS_REG 0x000
35 #define PCPPMU_INTMASK_REG 0x004
36 #define PCPPMU_INTMASK 0x0000000F
37 #define PCPPMU_INTENMASK 0xFFFFFFFF
38 #define PCPPMU_INTCLRMASK 0xFFFFFFF0
39 #define PCPPMU_INT_MCU BIT(0)
40 #define PCPPMU_INT_MCB BIT(1)
41 #define PCPPMU_INT_L3C BIT(2)
42 #define PCPPMU_INT_IOB BIT(3)
44 #define PCPPMU_V3_INTMASK 0x00FF33FF
45 #define PCPPMU_V3_INTENMASK 0xFFFFFFFF
46 #define PCPPMU_V3_INTCLRMASK 0xFF00CC00
47 #define PCPPMU_V3_INT_MCU 0x000000FF
48 #define PCPPMU_V3_INT_MCB 0x00000300
49 #define PCPPMU_V3_INT_L3C 0x00FF0000
50 #define PCPPMU_V3_INT_IOB 0x00003000
52 #define PMU_MAX_COUNTERS 4
53 #define PMU_CNT_MAX_PERIOD 0xFFFFFFFFULL
54 #define PMU_V3_CNT_MAX_PERIOD 0xFFFFFFFFFFFFFFFFULL
55 #define PMU_OVERFLOW_MASK 0xF
56 #define PMU_PMCR_E BIT(0)
57 #define PMU_PMCR_P BIT(1)
59 #define PMU_PMEVCNTR0 0x000
60 #define PMU_PMEVCNTR1 0x004
61 #define PMU_PMEVCNTR2 0x008
62 #define PMU_PMEVCNTR3 0x00C
63 #define PMU_PMEVTYPER0 0x400
64 #define PMU_PMEVTYPER1 0x404
65 #define PMU_PMEVTYPER2 0x408
66 #define PMU_PMEVTYPER3 0x40C
67 #define PMU_PMAMR0 0xA00
68 #define PMU_PMAMR1 0xA04
69 #define PMU_PMCNTENSET 0xC00
70 #define PMU_PMCNTENCLR 0xC20
71 #define PMU_PMINTENSET 0xC40
72 #define PMU_PMINTENCLR 0xC60
73 #define PMU_PMOVSR 0xC80
74 #define PMU_PMCR 0xE04
76 /* PMU registers for V3 */
77 #define PMU_PMOVSCLR 0xC80
78 #define PMU_PMOVSSET 0xCC0
80 #define to_pmu_dev(p) container_of(p, struct xgene_pmu_dev, pmu)
81 #define GET_CNTR(ev) (ev->hw.idx)
82 #define GET_EVENTID(ev) (ev->hw.config & 0xFFULL)
83 #define GET_AGENTID(ev) (ev->hw.config_base & 0xFFFFFFFFUL)
84 #define GET_AGENT1ID(ev) ((ev->hw.config_base >> 32) & 0xFFFFFFFFUL)
86 struct hw_pmu_info {
87 u32 type;
88 u32 enable_mask;
89 void __iomem *csr;
92 struct xgene_pmu_dev {
93 struct hw_pmu_info *inf;
94 struct xgene_pmu *parent;
95 struct pmu pmu;
96 u8 max_counters;
97 DECLARE_BITMAP(cntr_assign_mask, PMU_MAX_COUNTERS);
98 u64 max_period;
99 const struct attribute_group **attr_groups;
100 struct perf_event *pmu_counter_event[PMU_MAX_COUNTERS];
103 struct xgene_pmu_ops {
104 void (*mask_int)(struct xgene_pmu *pmu);
105 void (*unmask_int)(struct xgene_pmu *pmu);
106 u64 (*read_counter)(struct xgene_pmu_dev *pmu, int idx);
107 void (*write_counter)(struct xgene_pmu_dev *pmu, int idx, u64 val);
108 void (*write_evttype)(struct xgene_pmu_dev *pmu_dev, int idx, u32 val);
109 void (*write_agentmsk)(struct xgene_pmu_dev *pmu_dev, u32 val);
110 void (*write_agent1msk)(struct xgene_pmu_dev *pmu_dev, u32 val);
111 void (*enable_counter)(struct xgene_pmu_dev *pmu_dev, int idx);
112 void (*disable_counter)(struct xgene_pmu_dev *pmu_dev, int idx);
113 void (*enable_counter_int)(struct xgene_pmu_dev *pmu_dev, int idx);
114 void (*disable_counter_int)(struct xgene_pmu_dev *pmu_dev, int idx);
115 void (*reset_counters)(struct xgene_pmu_dev *pmu_dev);
116 void (*start_counters)(struct xgene_pmu_dev *pmu_dev);
117 void (*stop_counters)(struct xgene_pmu_dev *pmu_dev);
120 struct xgene_pmu {
121 struct device *dev;
122 struct hlist_node node;
123 int version;
124 void __iomem *pcppmu_csr;
125 u32 mcb_active_mask;
126 u32 mc_active_mask;
127 u32 l3c_active_mask;
128 cpumask_t cpu;
129 int irq;
130 raw_spinlock_t lock;
131 const struct xgene_pmu_ops *ops;
132 struct list_head l3cpmus;
133 struct list_head iobpmus;
134 struct list_head mcbpmus;
135 struct list_head mcpmus;
138 struct xgene_pmu_dev_ctx {
139 char *name;
140 struct list_head next;
141 struct xgene_pmu_dev *pmu_dev;
142 struct hw_pmu_info inf;
145 struct xgene_pmu_data {
146 int id;
147 u32 data;
150 enum xgene_pmu_version {
151 PCP_PMU_V1 = 1,
152 PCP_PMU_V2,
153 PCP_PMU_V3,
156 enum xgene_pmu_dev_type {
157 PMU_TYPE_L3C = 0,
158 PMU_TYPE_IOB,
159 PMU_TYPE_IOB_SLOW,
160 PMU_TYPE_MCB,
161 PMU_TYPE_MC,
165 * sysfs format attributes
167 static ssize_t xgene_pmu_format_show(struct device *dev,
168 struct device_attribute *attr, char *buf)
170 struct dev_ext_attribute *eattr;
172 eattr = container_of(attr, struct dev_ext_attribute, attr);
173 return sprintf(buf, "%s\n", (char *) eattr->var);
176 #define XGENE_PMU_FORMAT_ATTR(_name, _config) \
177 (&((struct dev_ext_attribute[]) { \
178 { .attr = __ATTR(_name, S_IRUGO, xgene_pmu_format_show, NULL), \
179 .var = (void *) _config, } \
180 })[0].attr.attr)
182 static struct attribute *l3c_pmu_format_attrs[] = {
183 XGENE_PMU_FORMAT_ATTR(l3c_eventid, "config:0-7"),
184 XGENE_PMU_FORMAT_ATTR(l3c_agentid, "config1:0-9"),
185 NULL,
188 static struct attribute *iob_pmu_format_attrs[] = {
189 XGENE_PMU_FORMAT_ATTR(iob_eventid, "config:0-7"),
190 XGENE_PMU_FORMAT_ATTR(iob_agentid, "config1:0-63"),
191 NULL,
194 static struct attribute *mcb_pmu_format_attrs[] = {
195 XGENE_PMU_FORMAT_ATTR(mcb_eventid, "config:0-5"),
196 XGENE_PMU_FORMAT_ATTR(mcb_agentid, "config1:0-9"),
197 NULL,
200 static struct attribute *mc_pmu_format_attrs[] = {
201 XGENE_PMU_FORMAT_ATTR(mc_eventid, "config:0-28"),
202 NULL,
205 static const struct attribute_group l3c_pmu_format_attr_group = {
206 .name = "format",
207 .attrs = l3c_pmu_format_attrs,
210 static const struct attribute_group iob_pmu_format_attr_group = {
211 .name = "format",
212 .attrs = iob_pmu_format_attrs,
215 static const struct attribute_group mcb_pmu_format_attr_group = {
216 .name = "format",
217 .attrs = mcb_pmu_format_attrs,
220 static const struct attribute_group mc_pmu_format_attr_group = {
221 .name = "format",
222 .attrs = mc_pmu_format_attrs,
225 static struct attribute *l3c_pmu_v3_format_attrs[] = {
226 XGENE_PMU_FORMAT_ATTR(l3c_eventid, "config:0-39"),
227 NULL,
230 static struct attribute *iob_pmu_v3_format_attrs[] = {
231 XGENE_PMU_FORMAT_ATTR(iob_eventid, "config:0-47"),
232 NULL,
235 static struct attribute *iob_slow_pmu_v3_format_attrs[] = {
236 XGENE_PMU_FORMAT_ATTR(iob_slow_eventid, "config:0-16"),
237 NULL,
240 static struct attribute *mcb_pmu_v3_format_attrs[] = {
241 XGENE_PMU_FORMAT_ATTR(mcb_eventid, "config:0-35"),
242 NULL,
245 static struct attribute *mc_pmu_v3_format_attrs[] = {
246 XGENE_PMU_FORMAT_ATTR(mc_eventid, "config:0-44"),
247 NULL,
250 static const struct attribute_group l3c_pmu_v3_format_attr_group = {
251 .name = "format",
252 .attrs = l3c_pmu_v3_format_attrs,
255 static const struct attribute_group iob_pmu_v3_format_attr_group = {
256 .name = "format",
257 .attrs = iob_pmu_v3_format_attrs,
260 static const struct attribute_group iob_slow_pmu_v3_format_attr_group = {
261 .name = "format",
262 .attrs = iob_slow_pmu_v3_format_attrs,
265 static const struct attribute_group mcb_pmu_v3_format_attr_group = {
266 .name = "format",
267 .attrs = mcb_pmu_v3_format_attrs,
270 static const struct attribute_group mc_pmu_v3_format_attr_group = {
271 .name = "format",
272 .attrs = mc_pmu_v3_format_attrs,
276 * sysfs event attributes
278 static ssize_t xgene_pmu_event_show(struct device *dev,
279 struct device_attribute *attr, char *buf)
281 struct dev_ext_attribute *eattr;
283 eattr = container_of(attr, struct dev_ext_attribute, attr);
284 return sprintf(buf, "config=0x%lx\n", (unsigned long) eattr->var);
287 #define XGENE_PMU_EVENT_ATTR(_name, _config) \
288 (&((struct dev_ext_attribute[]) { \
289 { .attr = __ATTR(_name, S_IRUGO, xgene_pmu_event_show, NULL), \
290 .var = (void *) _config, } \
291 })[0].attr.attr)
293 static struct attribute *l3c_pmu_events_attrs[] = {
294 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
295 XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
296 XGENE_PMU_EVENT_ATTR(read-hit, 0x02),
297 XGENE_PMU_EVENT_ATTR(read-miss, 0x03),
298 XGENE_PMU_EVENT_ATTR(write-need-replacement, 0x06),
299 XGENE_PMU_EVENT_ATTR(write-not-need-replacement, 0x07),
300 XGENE_PMU_EVENT_ATTR(tq-full, 0x08),
301 XGENE_PMU_EVENT_ATTR(ackq-full, 0x09),
302 XGENE_PMU_EVENT_ATTR(wdb-full, 0x0a),
303 XGENE_PMU_EVENT_ATTR(bank-fifo-full, 0x0b),
304 XGENE_PMU_EVENT_ATTR(odb-full, 0x0c),
305 XGENE_PMU_EVENT_ATTR(wbq-full, 0x0d),
306 XGENE_PMU_EVENT_ATTR(bank-conflict-fifo-issue, 0x0e),
307 XGENE_PMU_EVENT_ATTR(bank-fifo-issue, 0x0f),
308 NULL,
311 static struct attribute *iob_pmu_events_attrs[] = {
312 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
313 XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
314 XGENE_PMU_EVENT_ATTR(axi0-read, 0x02),
315 XGENE_PMU_EVENT_ATTR(axi0-read-partial, 0x03),
316 XGENE_PMU_EVENT_ATTR(axi1-read, 0x04),
317 XGENE_PMU_EVENT_ATTR(axi1-read-partial, 0x05),
318 XGENE_PMU_EVENT_ATTR(csw-read-block, 0x06),
319 XGENE_PMU_EVENT_ATTR(csw-read-partial, 0x07),
320 XGENE_PMU_EVENT_ATTR(axi0-write, 0x10),
321 XGENE_PMU_EVENT_ATTR(axi0-write-partial, 0x11),
322 XGENE_PMU_EVENT_ATTR(axi1-write, 0x13),
323 XGENE_PMU_EVENT_ATTR(axi1-write-partial, 0x14),
324 XGENE_PMU_EVENT_ATTR(csw-inbound-dirty, 0x16),
325 NULL,
328 static struct attribute *mcb_pmu_events_attrs[] = {
329 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
330 XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
331 XGENE_PMU_EVENT_ATTR(csw-read, 0x02),
332 XGENE_PMU_EVENT_ATTR(csw-write-request, 0x03),
333 XGENE_PMU_EVENT_ATTR(mcb-csw-stall, 0x04),
334 XGENE_PMU_EVENT_ATTR(cancel-read-gack, 0x05),
335 NULL,
338 static struct attribute *mc_pmu_events_attrs[] = {
339 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
340 XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
341 XGENE_PMU_EVENT_ATTR(act-cmd-sent, 0x02),
342 XGENE_PMU_EVENT_ATTR(pre-cmd-sent, 0x03),
343 XGENE_PMU_EVENT_ATTR(rd-cmd-sent, 0x04),
344 XGENE_PMU_EVENT_ATTR(rda-cmd-sent, 0x05),
345 XGENE_PMU_EVENT_ATTR(wr-cmd-sent, 0x06),
346 XGENE_PMU_EVENT_ATTR(wra-cmd-sent, 0x07),
347 XGENE_PMU_EVENT_ATTR(pde-cmd-sent, 0x08),
348 XGENE_PMU_EVENT_ATTR(sre-cmd-sent, 0x09),
349 XGENE_PMU_EVENT_ATTR(prea-cmd-sent, 0x0a),
350 XGENE_PMU_EVENT_ATTR(ref-cmd-sent, 0x0b),
351 XGENE_PMU_EVENT_ATTR(rd-rda-cmd-sent, 0x0c),
352 XGENE_PMU_EVENT_ATTR(wr-wra-cmd-sent, 0x0d),
353 XGENE_PMU_EVENT_ATTR(in-rd-collision, 0x0e),
354 XGENE_PMU_EVENT_ATTR(in-wr-collision, 0x0f),
355 XGENE_PMU_EVENT_ATTR(collision-queue-not-empty, 0x10),
356 XGENE_PMU_EVENT_ATTR(collision-queue-full, 0x11),
357 XGENE_PMU_EVENT_ATTR(mcu-request, 0x12),
358 XGENE_PMU_EVENT_ATTR(mcu-rd-request, 0x13),
359 XGENE_PMU_EVENT_ATTR(mcu-hp-rd-request, 0x14),
360 XGENE_PMU_EVENT_ATTR(mcu-wr-request, 0x15),
361 XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-all, 0x16),
362 XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-cancel, 0x17),
363 XGENE_PMU_EVENT_ATTR(mcu-rd-response, 0x18),
364 XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-all, 0x19),
365 XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-cancel, 0x1a),
366 XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-all, 0x1b),
367 XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-cancel, 0x1c),
368 NULL,
371 static const struct attribute_group l3c_pmu_events_attr_group = {
372 .name = "events",
373 .attrs = l3c_pmu_events_attrs,
376 static const struct attribute_group iob_pmu_events_attr_group = {
377 .name = "events",
378 .attrs = iob_pmu_events_attrs,
381 static const struct attribute_group mcb_pmu_events_attr_group = {
382 .name = "events",
383 .attrs = mcb_pmu_events_attrs,
386 static const struct attribute_group mc_pmu_events_attr_group = {
387 .name = "events",
388 .attrs = mc_pmu_events_attrs,
391 static struct attribute *l3c_pmu_v3_events_attrs[] = {
392 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
393 XGENE_PMU_EVENT_ATTR(read-hit, 0x01),
394 XGENE_PMU_EVENT_ATTR(read-miss, 0x02),
395 XGENE_PMU_EVENT_ATTR(index-flush-eviction, 0x03),
396 XGENE_PMU_EVENT_ATTR(write-caused-replacement, 0x04),
397 XGENE_PMU_EVENT_ATTR(write-not-caused-replacement, 0x05),
398 XGENE_PMU_EVENT_ATTR(clean-eviction, 0x06),
399 XGENE_PMU_EVENT_ATTR(dirty-eviction, 0x07),
400 XGENE_PMU_EVENT_ATTR(read, 0x08),
401 XGENE_PMU_EVENT_ATTR(write, 0x09),
402 XGENE_PMU_EVENT_ATTR(request, 0x0a),
403 XGENE_PMU_EVENT_ATTR(tq-bank-conflict-issue-stall, 0x0b),
404 XGENE_PMU_EVENT_ATTR(tq-full, 0x0c),
405 XGENE_PMU_EVENT_ATTR(ackq-full, 0x0d),
406 XGENE_PMU_EVENT_ATTR(wdb-full, 0x0e),
407 XGENE_PMU_EVENT_ATTR(odb-full, 0x10),
408 XGENE_PMU_EVENT_ATTR(wbq-full, 0x11),
409 XGENE_PMU_EVENT_ATTR(input-req-async-fifo-stall, 0x12),
410 XGENE_PMU_EVENT_ATTR(output-req-async-fifo-stall, 0x13),
411 XGENE_PMU_EVENT_ATTR(output-data-async-fifo-stall, 0x14),
412 XGENE_PMU_EVENT_ATTR(total-insertion, 0x15),
413 XGENE_PMU_EVENT_ATTR(sip-insertions-r-set, 0x16),
414 XGENE_PMU_EVENT_ATTR(sip-insertions-r-clear, 0x17),
415 XGENE_PMU_EVENT_ATTR(dip-insertions-r-set, 0x18),
416 XGENE_PMU_EVENT_ATTR(dip-insertions-r-clear, 0x19),
417 XGENE_PMU_EVENT_ATTR(dip-insertions-force-r-set, 0x1a),
418 XGENE_PMU_EVENT_ATTR(egression, 0x1b),
419 XGENE_PMU_EVENT_ATTR(replacement, 0x1c),
420 XGENE_PMU_EVENT_ATTR(old-replacement, 0x1d),
421 XGENE_PMU_EVENT_ATTR(young-replacement, 0x1e),
422 XGENE_PMU_EVENT_ATTR(r-set-replacement, 0x1f),
423 XGENE_PMU_EVENT_ATTR(r-clear-replacement, 0x20),
424 XGENE_PMU_EVENT_ATTR(old-r-replacement, 0x21),
425 XGENE_PMU_EVENT_ATTR(old-nr-replacement, 0x22),
426 XGENE_PMU_EVENT_ATTR(young-r-replacement, 0x23),
427 XGENE_PMU_EVENT_ATTR(young-nr-replacement, 0x24),
428 XGENE_PMU_EVENT_ATTR(bloomfilter-clearing, 0x25),
429 XGENE_PMU_EVENT_ATTR(generation-flip, 0x26),
430 XGENE_PMU_EVENT_ATTR(vcc-droop-detected, 0x27),
431 NULL,
434 static struct attribute *iob_fast_pmu_v3_events_attrs[] = {
435 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
436 XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-all, 0x01),
437 XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-rd, 0x02),
438 XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-wr, 0x03),
439 XGENE_PMU_EVENT_ATTR(pa-all-cp-req, 0x04),
440 XGENE_PMU_EVENT_ATTR(pa-cp-blk-req, 0x05),
441 XGENE_PMU_EVENT_ATTR(pa-cp-ptl-req, 0x06),
442 XGENE_PMU_EVENT_ATTR(pa-cp-rd-req, 0x07),
443 XGENE_PMU_EVENT_ATTR(pa-cp-wr-req, 0x08),
444 XGENE_PMU_EVENT_ATTR(ba-all-req, 0x09),
445 XGENE_PMU_EVENT_ATTR(ba-rd-req, 0x0a),
446 XGENE_PMU_EVENT_ATTR(ba-wr-req, 0x0b),
447 XGENE_PMU_EVENT_ATTR(pa-rd-shared-req-issued, 0x10),
448 XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-req-issued, 0x11),
449 XGENE_PMU_EVENT_ATTR(pa-wr-invalidate-req-issued-stashable, 0x12),
450 XGENE_PMU_EVENT_ATTR(pa-wr-invalidate-req-issued-nonstashable, 0x13),
451 XGENE_PMU_EVENT_ATTR(pa-wr-back-req-issued-stashable, 0x14),
452 XGENE_PMU_EVENT_ATTR(pa-wr-back-req-issued-nonstashable, 0x15),
453 XGENE_PMU_EVENT_ATTR(pa-ptl-wr-req, 0x16),
454 XGENE_PMU_EVENT_ATTR(pa-ptl-rd-req, 0x17),
455 XGENE_PMU_EVENT_ATTR(pa-wr-back-clean-data, 0x18),
456 XGENE_PMU_EVENT_ATTR(pa-wr-back-cancelled-on-SS, 0x1b),
457 XGENE_PMU_EVENT_ATTR(pa-barrier-occurrence, 0x1c),
458 XGENE_PMU_EVENT_ATTR(pa-barrier-cycles, 0x1d),
459 XGENE_PMU_EVENT_ATTR(pa-total-cp-snoops, 0x20),
460 XGENE_PMU_EVENT_ATTR(pa-rd-shared-snoop, 0x21),
461 XGENE_PMU_EVENT_ATTR(pa-rd-shared-snoop-hit, 0x22),
462 XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-snoop, 0x23),
463 XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-snoop-hit, 0x24),
464 XGENE_PMU_EVENT_ATTR(pa-rd-wr-invalid-snoop, 0x25),
465 XGENE_PMU_EVENT_ATTR(pa-rd-wr-invalid-snoop-hit, 0x26),
466 XGENE_PMU_EVENT_ATTR(pa-req-buffer-full, 0x28),
467 XGENE_PMU_EVENT_ATTR(cswlf-outbound-req-fifo-full, 0x29),
468 XGENE_PMU_EVENT_ATTR(cswlf-inbound-snoop-fifo-backpressure, 0x2a),
469 XGENE_PMU_EVENT_ATTR(cswlf-outbound-lack-fifo-full, 0x2b),
470 XGENE_PMU_EVENT_ATTR(cswlf-inbound-gack-fifo-backpressure, 0x2c),
471 XGENE_PMU_EVENT_ATTR(cswlf-outbound-data-fifo-full, 0x2d),
472 XGENE_PMU_EVENT_ATTR(cswlf-inbound-data-fifo-backpressure, 0x2e),
473 XGENE_PMU_EVENT_ATTR(cswlf-inbound-req-backpressure, 0x2f),
474 NULL,
477 static struct attribute *iob_slow_pmu_v3_events_attrs[] = {
478 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
479 XGENE_PMU_EVENT_ATTR(pa-axi0-rd-req, 0x01),
480 XGENE_PMU_EVENT_ATTR(pa-axi0-wr-req, 0x02),
481 XGENE_PMU_EVENT_ATTR(pa-axi1-rd-req, 0x03),
482 XGENE_PMU_EVENT_ATTR(pa-axi1-wr-req, 0x04),
483 XGENE_PMU_EVENT_ATTR(ba-all-axi-req, 0x07),
484 XGENE_PMU_EVENT_ATTR(ba-axi-rd-req, 0x08),
485 XGENE_PMU_EVENT_ATTR(ba-axi-wr-req, 0x09),
486 XGENE_PMU_EVENT_ATTR(ba-free-list-empty, 0x10),
487 NULL,
490 static struct attribute *mcb_pmu_v3_events_attrs[] = {
491 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
492 XGENE_PMU_EVENT_ATTR(req-receive, 0x01),
493 XGENE_PMU_EVENT_ATTR(rd-req-recv, 0x02),
494 XGENE_PMU_EVENT_ATTR(rd-req-recv-2, 0x03),
495 XGENE_PMU_EVENT_ATTR(wr-req-recv, 0x04),
496 XGENE_PMU_EVENT_ATTR(wr-req-recv-2, 0x05),
497 XGENE_PMU_EVENT_ATTR(rd-req-sent-to-mcu, 0x06),
498 XGENE_PMU_EVENT_ATTR(rd-req-sent-to-mcu-2, 0x07),
499 XGENE_PMU_EVENT_ATTR(rd-req-sent-to-spec-mcu, 0x08),
500 XGENE_PMU_EVENT_ATTR(rd-req-sent-to-spec-mcu-2, 0x09),
501 XGENE_PMU_EVENT_ATTR(glbl-ack-recv-for-rd-sent-to-spec-mcu, 0x0a),
502 XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-for-rd-sent-to-spec-mcu, 0x0b),
503 XGENE_PMU_EVENT_ATTR(glbl-ack-nogo-recv-for-rd-sent-to-spec-mcu, 0x0c),
504 XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-any-rd-req, 0x0d),
505 XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-any-rd-req-2, 0x0e),
506 XGENE_PMU_EVENT_ATTR(wr-req-sent-to-mcu, 0x0f),
507 XGENE_PMU_EVENT_ATTR(gack-recv, 0x10),
508 XGENE_PMU_EVENT_ATTR(rd-gack-recv, 0x11),
509 XGENE_PMU_EVENT_ATTR(wr-gack-recv, 0x12),
510 XGENE_PMU_EVENT_ATTR(cancel-rd-gack, 0x13),
511 XGENE_PMU_EVENT_ATTR(cancel-wr-gack, 0x14),
512 XGENE_PMU_EVENT_ATTR(mcb-csw-req-stall, 0x15),
513 XGENE_PMU_EVENT_ATTR(mcu-req-intf-blocked, 0x16),
514 XGENE_PMU_EVENT_ATTR(mcb-mcu-rd-intf-stall, 0x17),
515 XGENE_PMU_EVENT_ATTR(csw-rd-intf-blocked, 0x18),
516 XGENE_PMU_EVENT_ATTR(csw-local-ack-intf-blocked, 0x19),
517 XGENE_PMU_EVENT_ATTR(mcu-req-table-full, 0x1a),
518 XGENE_PMU_EVENT_ATTR(mcu-stat-table-full, 0x1b),
519 XGENE_PMU_EVENT_ATTR(mcu-wr-table-full, 0x1c),
520 XGENE_PMU_EVENT_ATTR(mcu-rdreceipt-resp, 0x1d),
521 XGENE_PMU_EVENT_ATTR(mcu-wrcomplete-resp, 0x1e),
522 XGENE_PMU_EVENT_ATTR(mcu-retryack-resp, 0x1f),
523 XGENE_PMU_EVENT_ATTR(mcu-pcrdgrant-resp, 0x20),
524 XGENE_PMU_EVENT_ATTR(mcu-req-from-lastload, 0x21),
525 XGENE_PMU_EVENT_ATTR(mcu-req-from-bypass, 0x22),
526 XGENE_PMU_EVENT_ATTR(volt-droop-detect, 0x23),
527 NULL,
530 static struct attribute *mc_pmu_v3_events_attrs[] = {
531 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
532 XGENE_PMU_EVENT_ATTR(act-sent, 0x01),
533 XGENE_PMU_EVENT_ATTR(pre-sent, 0x02),
534 XGENE_PMU_EVENT_ATTR(rd-sent, 0x03),
535 XGENE_PMU_EVENT_ATTR(rda-sent, 0x04),
536 XGENE_PMU_EVENT_ATTR(wr-sent, 0x05),
537 XGENE_PMU_EVENT_ATTR(wra-sent, 0x06),
538 XGENE_PMU_EVENT_ATTR(pd-entry-vld, 0x07),
539 XGENE_PMU_EVENT_ATTR(sref-entry-vld, 0x08),
540 XGENE_PMU_EVENT_ATTR(prea-sent, 0x09),
541 XGENE_PMU_EVENT_ATTR(ref-sent, 0x0a),
542 XGENE_PMU_EVENT_ATTR(rd-rda-sent, 0x0b),
543 XGENE_PMU_EVENT_ATTR(wr-wra-sent, 0x0c),
544 XGENE_PMU_EVENT_ATTR(raw-hazard, 0x0d),
545 XGENE_PMU_EVENT_ATTR(war-hazard, 0x0e),
546 XGENE_PMU_EVENT_ATTR(waw-hazard, 0x0f),
547 XGENE_PMU_EVENT_ATTR(rar-hazard, 0x10),
548 XGENE_PMU_EVENT_ATTR(raw-war-waw-hazard, 0x11),
549 XGENE_PMU_EVENT_ATTR(hprd-lprd-wr-req-vld, 0x12),
550 XGENE_PMU_EVENT_ATTR(lprd-req-vld, 0x13),
551 XGENE_PMU_EVENT_ATTR(hprd-req-vld, 0x14),
552 XGENE_PMU_EVENT_ATTR(hprd-lprd-req-vld, 0x15),
553 XGENE_PMU_EVENT_ATTR(wr-req-vld, 0x16),
554 XGENE_PMU_EVENT_ATTR(partial-wr-req-vld, 0x17),
555 XGENE_PMU_EVENT_ATTR(rd-retry, 0x18),
556 XGENE_PMU_EVENT_ATTR(wr-retry, 0x19),
557 XGENE_PMU_EVENT_ATTR(retry-gnt, 0x1a),
558 XGENE_PMU_EVENT_ATTR(rank-change, 0x1b),
559 XGENE_PMU_EVENT_ATTR(dir-change, 0x1c),
560 XGENE_PMU_EVENT_ATTR(rank-dir-change, 0x1d),
561 XGENE_PMU_EVENT_ATTR(rank-active, 0x1e),
562 XGENE_PMU_EVENT_ATTR(rank-idle, 0x1f),
563 XGENE_PMU_EVENT_ATTR(rank-pd, 0x20),
564 XGENE_PMU_EVENT_ATTR(rank-sref, 0x21),
565 XGENE_PMU_EVENT_ATTR(queue-fill-gt-thresh, 0x22),
566 XGENE_PMU_EVENT_ATTR(queue-rds-gt-thresh, 0x23),
567 XGENE_PMU_EVENT_ATTR(queue-wrs-gt-thresh, 0x24),
568 XGENE_PMU_EVENT_ATTR(phy-updt-complt, 0x25),
569 XGENE_PMU_EVENT_ATTR(tz-fail, 0x26),
570 XGENE_PMU_EVENT_ATTR(dram-errc, 0x27),
571 XGENE_PMU_EVENT_ATTR(dram-errd, 0x28),
572 XGENE_PMU_EVENT_ATTR(rd-enq, 0x29),
573 XGENE_PMU_EVENT_ATTR(wr-enq, 0x2a),
574 XGENE_PMU_EVENT_ATTR(tmac-limit-reached, 0x2b),
575 XGENE_PMU_EVENT_ATTR(tmaw-tracker-full, 0x2c),
576 NULL,
579 static const struct attribute_group l3c_pmu_v3_events_attr_group = {
580 .name = "events",
581 .attrs = l3c_pmu_v3_events_attrs,
584 static const struct attribute_group iob_fast_pmu_v3_events_attr_group = {
585 .name = "events",
586 .attrs = iob_fast_pmu_v3_events_attrs,
589 static const struct attribute_group iob_slow_pmu_v3_events_attr_group = {
590 .name = "events",
591 .attrs = iob_slow_pmu_v3_events_attrs,
594 static const struct attribute_group mcb_pmu_v3_events_attr_group = {
595 .name = "events",
596 .attrs = mcb_pmu_v3_events_attrs,
599 static const struct attribute_group mc_pmu_v3_events_attr_group = {
600 .name = "events",
601 .attrs = mc_pmu_v3_events_attrs,
605 * sysfs cpumask attributes
607 static ssize_t xgene_pmu_cpumask_show(struct device *dev,
608 struct device_attribute *attr, char *buf)
610 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(dev_get_drvdata(dev));
612 return cpumap_print_to_pagebuf(true, buf, &pmu_dev->parent->cpu);
615 static DEVICE_ATTR(cpumask, S_IRUGO, xgene_pmu_cpumask_show, NULL);
617 static struct attribute *xgene_pmu_cpumask_attrs[] = {
618 &dev_attr_cpumask.attr,
619 NULL,
622 static const struct attribute_group pmu_cpumask_attr_group = {
623 .attrs = xgene_pmu_cpumask_attrs,
627 * Per PMU device attribute groups of PMU v1 and v2
629 static const struct attribute_group *l3c_pmu_attr_groups[] = {
630 &l3c_pmu_format_attr_group,
631 &pmu_cpumask_attr_group,
632 &l3c_pmu_events_attr_group,
633 NULL
636 static const struct attribute_group *iob_pmu_attr_groups[] = {
637 &iob_pmu_format_attr_group,
638 &pmu_cpumask_attr_group,
639 &iob_pmu_events_attr_group,
640 NULL
643 static const struct attribute_group *mcb_pmu_attr_groups[] = {
644 &mcb_pmu_format_attr_group,
645 &pmu_cpumask_attr_group,
646 &mcb_pmu_events_attr_group,
647 NULL
650 static const struct attribute_group *mc_pmu_attr_groups[] = {
651 &mc_pmu_format_attr_group,
652 &pmu_cpumask_attr_group,
653 &mc_pmu_events_attr_group,
654 NULL
658 * Per PMU device attribute groups of PMU v3
660 static const struct attribute_group *l3c_pmu_v3_attr_groups[] = {
661 &l3c_pmu_v3_format_attr_group,
662 &pmu_cpumask_attr_group,
663 &l3c_pmu_v3_events_attr_group,
664 NULL
667 static const struct attribute_group *iob_fast_pmu_v3_attr_groups[] = {
668 &iob_pmu_v3_format_attr_group,
669 &pmu_cpumask_attr_group,
670 &iob_fast_pmu_v3_events_attr_group,
671 NULL
674 static const struct attribute_group *iob_slow_pmu_v3_attr_groups[] = {
675 &iob_slow_pmu_v3_format_attr_group,
676 &pmu_cpumask_attr_group,
677 &iob_slow_pmu_v3_events_attr_group,
678 NULL
681 static const struct attribute_group *mcb_pmu_v3_attr_groups[] = {
682 &mcb_pmu_v3_format_attr_group,
683 &pmu_cpumask_attr_group,
684 &mcb_pmu_v3_events_attr_group,
685 NULL
688 static const struct attribute_group *mc_pmu_v3_attr_groups[] = {
689 &mc_pmu_v3_format_attr_group,
690 &pmu_cpumask_attr_group,
691 &mc_pmu_v3_events_attr_group,
692 NULL
695 static int get_next_avail_cntr(struct xgene_pmu_dev *pmu_dev)
697 int cntr;
699 cntr = find_first_zero_bit(pmu_dev->cntr_assign_mask,
700 pmu_dev->max_counters);
701 if (cntr == pmu_dev->max_counters)
702 return -ENOSPC;
703 set_bit(cntr, pmu_dev->cntr_assign_mask);
705 return cntr;
708 static void clear_avail_cntr(struct xgene_pmu_dev *pmu_dev, int cntr)
710 clear_bit(cntr, pmu_dev->cntr_assign_mask);
713 static inline void xgene_pmu_mask_int(struct xgene_pmu *xgene_pmu)
715 writel(PCPPMU_INTENMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
718 static inline void xgene_pmu_v3_mask_int(struct xgene_pmu *xgene_pmu)
720 writel(PCPPMU_V3_INTENMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
723 static inline void xgene_pmu_unmask_int(struct xgene_pmu *xgene_pmu)
725 writel(PCPPMU_INTCLRMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
728 static inline void xgene_pmu_v3_unmask_int(struct xgene_pmu *xgene_pmu)
730 writel(PCPPMU_V3_INTCLRMASK,
731 xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
734 static inline u64 xgene_pmu_read_counter32(struct xgene_pmu_dev *pmu_dev,
735 int idx)
737 return readl(pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx));
740 static inline u64 xgene_pmu_read_counter64(struct xgene_pmu_dev *pmu_dev,
741 int idx)
743 u32 lo, hi;
746 * v3 has 64-bit counter registers composed by 2 32-bit registers
747 * This can be a problem if the counter increases and carries
748 * out of bit [31] between 2 reads. The extra reads would help
749 * to prevent this issue.
751 do {
752 hi = xgene_pmu_read_counter32(pmu_dev, 2 * idx + 1);
753 lo = xgene_pmu_read_counter32(pmu_dev, 2 * idx);
754 } while (hi != xgene_pmu_read_counter32(pmu_dev, 2 * idx + 1));
756 return (((u64)hi << 32) | lo);
759 static inline void
760 xgene_pmu_write_counter32(struct xgene_pmu_dev *pmu_dev, int idx, u64 val)
762 writel(val, pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx));
765 static inline void
766 xgene_pmu_write_counter64(struct xgene_pmu_dev *pmu_dev, int idx, u64 val)
768 u32 cnt_lo, cnt_hi;
770 cnt_hi = upper_32_bits(val);
771 cnt_lo = lower_32_bits(val);
773 /* v3 has 64-bit counter registers composed by 2 32-bit registers */
774 xgene_pmu_write_counter32(pmu_dev, 2 * idx, cnt_lo);
775 xgene_pmu_write_counter32(pmu_dev, 2 * idx + 1, cnt_hi);
778 static inline void
779 xgene_pmu_write_evttype(struct xgene_pmu_dev *pmu_dev, int idx, u32 val)
781 writel(val, pmu_dev->inf->csr + PMU_PMEVTYPER0 + (4 * idx));
784 static inline void
785 xgene_pmu_write_agentmsk(struct xgene_pmu_dev *pmu_dev, u32 val)
787 writel(val, pmu_dev->inf->csr + PMU_PMAMR0);
790 static inline void
791 xgene_pmu_v3_write_agentmsk(struct xgene_pmu_dev *pmu_dev, u32 val) { }
793 static inline void
794 xgene_pmu_write_agent1msk(struct xgene_pmu_dev *pmu_dev, u32 val)
796 writel(val, pmu_dev->inf->csr + PMU_PMAMR1);
799 static inline void
800 xgene_pmu_v3_write_agent1msk(struct xgene_pmu_dev *pmu_dev, u32 val) { }
802 static inline void
803 xgene_pmu_enable_counter(struct xgene_pmu_dev *pmu_dev, int idx)
805 u32 val;
807 val = readl(pmu_dev->inf->csr + PMU_PMCNTENSET);
808 val |= 1 << idx;
809 writel(val, pmu_dev->inf->csr + PMU_PMCNTENSET);
812 static inline void
813 xgene_pmu_disable_counter(struct xgene_pmu_dev *pmu_dev, int idx)
815 u32 val;
817 val = readl(pmu_dev->inf->csr + PMU_PMCNTENCLR);
818 val |= 1 << idx;
819 writel(val, pmu_dev->inf->csr + PMU_PMCNTENCLR);
822 static inline void
823 xgene_pmu_enable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx)
825 u32 val;
827 val = readl(pmu_dev->inf->csr + PMU_PMINTENSET);
828 val |= 1 << idx;
829 writel(val, pmu_dev->inf->csr + PMU_PMINTENSET);
832 static inline void
833 xgene_pmu_disable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx)
835 u32 val;
837 val = readl(pmu_dev->inf->csr + PMU_PMINTENCLR);
838 val |= 1 << idx;
839 writel(val, pmu_dev->inf->csr + PMU_PMINTENCLR);
842 static inline void xgene_pmu_reset_counters(struct xgene_pmu_dev *pmu_dev)
844 u32 val;
846 val = readl(pmu_dev->inf->csr + PMU_PMCR);
847 val |= PMU_PMCR_P;
848 writel(val, pmu_dev->inf->csr + PMU_PMCR);
851 static inline void xgene_pmu_start_counters(struct xgene_pmu_dev *pmu_dev)
853 u32 val;
855 val = readl(pmu_dev->inf->csr + PMU_PMCR);
856 val |= PMU_PMCR_E;
857 writel(val, pmu_dev->inf->csr + PMU_PMCR);
860 static inline void xgene_pmu_stop_counters(struct xgene_pmu_dev *pmu_dev)
862 u32 val;
864 val = readl(pmu_dev->inf->csr + PMU_PMCR);
865 val &= ~PMU_PMCR_E;
866 writel(val, pmu_dev->inf->csr + PMU_PMCR);
869 static void xgene_perf_pmu_enable(struct pmu *pmu)
871 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
872 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
873 int enabled = bitmap_weight(pmu_dev->cntr_assign_mask,
874 pmu_dev->max_counters);
876 if (!enabled)
877 return;
879 xgene_pmu->ops->start_counters(pmu_dev);
882 static void xgene_perf_pmu_disable(struct pmu *pmu)
884 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
885 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
887 xgene_pmu->ops->stop_counters(pmu_dev);
890 static int xgene_perf_event_init(struct perf_event *event)
892 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
893 struct hw_perf_event *hw = &event->hw;
894 struct perf_event *sibling;
896 /* Test the event attr type check for PMU enumeration */
897 if (event->attr.type != event->pmu->type)
898 return -ENOENT;
901 * SOC PMU counters are shared across all cores.
902 * Therefore, it does not support per-process mode.
903 * Also, it does not support event sampling mode.
905 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
906 return -EINVAL;
908 if (event->cpu < 0)
909 return -EINVAL;
911 * Many perf core operations (eg. events rotation) operate on a
912 * single CPU context. This is obvious for CPU PMUs, where one
913 * expects the same sets of events being observed on all CPUs,
914 * but can lead to issues for off-core PMUs, where each
915 * event could be theoretically assigned to a different CPU. To
916 * mitigate this, we enforce CPU assignment to one, selected
917 * processor (the one described in the "cpumask" attribute).
919 event->cpu = cpumask_first(&pmu_dev->parent->cpu);
921 hw->config = event->attr.config;
923 * Each bit of the config1 field represents an agent from which the
924 * request of the event come. The event is counted only if it's caused
925 * by a request of an agent has the bit cleared.
926 * By default, the event is counted for all agents.
928 hw->config_base = event->attr.config1;
931 * We must NOT create groups containing mixed PMUs, although software
932 * events are acceptable
934 if (event->group_leader->pmu != event->pmu &&
935 !is_software_event(event->group_leader))
936 return -EINVAL;
938 for_each_sibling_event(sibling, event->group_leader) {
939 if (sibling->pmu != event->pmu &&
940 !is_software_event(sibling))
941 return -EINVAL;
944 return 0;
947 static void xgene_perf_enable_event(struct perf_event *event)
949 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
950 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
952 xgene_pmu->ops->write_evttype(pmu_dev, GET_CNTR(event),
953 GET_EVENTID(event));
954 xgene_pmu->ops->write_agentmsk(pmu_dev, ~((u32)GET_AGENTID(event)));
955 if (pmu_dev->inf->type == PMU_TYPE_IOB)
956 xgene_pmu->ops->write_agent1msk(pmu_dev,
957 ~((u32)GET_AGENT1ID(event)));
959 xgene_pmu->ops->enable_counter(pmu_dev, GET_CNTR(event));
960 xgene_pmu->ops->enable_counter_int(pmu_dev, GET_CNTR(event));
963 static void xgene_perf_disable_event(struct perf_event *event)
965 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
966 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
968 xgene_pmu->ops->disable_counter(pmu_dev, GET_CNTR(event));
969 xgene_pmu->ops->disable_counter_int(pmu_dev, GET_CNTR(event));
972 static void xgene_perf_event_set_period(struct perf_event *event)
974 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
975 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
976 struct hw_perf_event *hw = &event->hw;
978 * For 32 bit counter, it has a period of 2^32. To account for the
979 * possibility of extreme interrupt latency we program for a period of
980 * half that. Hopefully, we can handle the interrupt before another 2^31
981 * events occur and the counter overtakes its previous value.
982 * For 64 bit counter, we don't expect it overflow.
984 u64 val = 1ULL << 31;
986 local64_set(&hw->prev_count, val);
987 xgene_pmu->ops->write_counter(pmu_dev, hw->idx, val);
990 static void xgene_perf_event_update(struct perf_event *event)
992 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
993 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
994 struct hw_perf_event *hw = &event->hw;
995 u64 delta, prev_raw_count, new_raw_count;
997 again:
998 prev_raw_count = local64_read(&hw->prev_count);
999 new_raw_count = xgene_pmu->ops->read_counter(pmu_dev, GET_CNTR(event));
1001 if (local64_cmpxchg(&hw->prev_count, prev_raw_count,
1002 new_raw_count) != prev_raw_count)
1003 goto again;
1005 delta = (new_raw_count - prev_raw_count) & pmu_dev->max_period;
1007 local64_add(delta, &event->count);
1010 static void xgene_perf_read(struct perf_event *event)
1012 xgene_perf_event_update(event);
1015 static void xgene_perf_start(struct perf_event *event, int flags)
1017 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
1018 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
1019 struct hw_perf_event *hw = &event->hw;
1021 if (WARN_ON_ONCE(!(hw->state & PERF_HES_STOPPED)))
1022 return;
1024 WARN_ON_ONCE(!(hw->state & PERF_HES_UPTODATE));
1025 hw->state = 0;
1027 xgene_perf_event_set_period(event);
1029 if (flags & PERF_EF_RELOAD) {
1030 u64 prev_raw_count = local64_read(&hw->prev_count);
1032 xgene_pmu->ops->write_counter(pmu_dev, GET_CNTR(event),
1033 prev_raw_count);
1036 xgene_perf_enable_event(event);
1037 perf_event_update_userpage(event);
1040 static void xgene_perf_stop(struct perf_event *event, int flags)
1042 struct hw_perf_event *hw = &event->hw;
1044 if (hw->state & PERF_HES_UPTODATE)
1045 return;
1047 xgene_perf_disable_event(event);
1048 WARN_ON_ONCE(hw->state & PERF_HES_STOPPED);
1049 hw->state |= PERF_HES_STOPPED;
1051 if (hw->state & PERF_HES_UPTODATE)
1052 return;
1054 xgene_perf_read(event);
1055 hw->state |= PERF_HES_UPTODATE;
1058 static int xgene_perf_add(struct perf_event *event, int flags)
1060 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
1061 struct hw_perf_event *hw = &event->hw;
1063 hw->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1065 /* Allocate an event counter */
1066 hw->idx = get_next_avail_cntr(pmu_dev);
1067 if (hw->idx < 0)
1068 return -EAGAIN;
1070 /* Update counter event pointer for Interrupt handler */
1071 pmu_dev->pmu_counter_event[hw->idx] = event;
1073 if (flags & PERF_EF_START)
1074 xgene_perf_start(event, PERF_EF_RELOAD);
1076 return 0;
1079 static void xgene_perf_del(struct perf_event *event, int flags)
1081 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
1082 struct hw_perf_event *hw = &event->hw;
1084 xgene_perf_stop(event, PERF_EF_UPDATE);
1086 /* clear the assigned counter */
1087 clear_avail_cntr(pmu_dev, GET_CNTR(event));
1089 perf_event_update_userpage(event);
1090 pmu_dev->pmu_counter_event[hw->idx] = NULL;
1093 static int xgene_init_perf(struct xgene_pmu_dev *pmu_dev, char *name)
1095 struct xgene_pmu *xgene_pmu;
1097 if (pmu_dev->parent->version == PCP_PMU_V3)
1098 pmu_dev->max_period = PMU_V3_CNT_MAX_PERIOD;
1099 else
1100 pmu_dev->max_period = PMU_CNT_MAX_PERIOD;
1101 /* First version PMU supports only single event counter */
1102 xgene_pmu = pmu_dev->parent;
1103 if (xgene_pmu->version == PCP_PMU_V1)
1104 pmu_dev->max_counters = 1;
1105 else
1106 pmu_dev->max_counters = PMU_MAX_COUNTERS;
1108 /* Perf driver registration */
1109 pmu_dev->pmu = (struct pmu) {
1110 .attr_groups = pmu_dev->attr_groups,
1111 .task_ctx_nr = perf_invalid_context,
1112 .pmu_enable = xgene_perf_pmu_enable,
1113 .pmu_disable = xgene_perf_pmu_disable,
1114 .event_init = xgene_perf_event_init,
1115 .add = xgene_perf_add,
1116 .del = xgene_perf_del,
1117 .start = xgene_perf_start,
1118 .stop = xgene_perf_stop,
1119 .read = xgene_perf_read,
1120 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
1123 /* Hardware counter init */
1124 xgene_pmu->ops->stop_counters(pmu_dev);
1125 xgene_pmu->ops->reset_counters(pmu_dev);
1127 return perf_pmu_register(&pmu_dev->pmu, name, -1);
1130 static int
1131 xgene_pmu_dev_add(struct xgene_pmu *xgene_pmu, struct xgene_pmu_dev_ctx *ctx)
1133 struct device *dev = xgene_pmu->dev;
1134 struct xgene_pmu_dev *pmu;
1136 pmu = devm_kzalloc(dev, sizeof(*pmu), GFP_KERNEL);
1137 if (!pmu)
1138 return -ENOMEM;
1139 pmu->parent = xgene_pmu;
1140 pmu->inf = &ctx->inf;
1141 ctx->pmu_dev = pmu;
1143 switch (pmu->inf->type) {
1144 case PMU_TYPE_L3C:
1145 if (!(xgene_pmu->l3c_active_mask & pmu->inf->enable_mask))
1146 return -ENODEV;
1147 if (xgene_pmu->version == PCP_PMU_V3)
1148 pmu->attr_groups = l3c_pmu_v3_attr_groups;
1149 else
1150 pmu->attr_groups = l3c_pmu_attr_groups;
1151 break;
1152 case PMU_TYPE_IOB:
1153 if (xgene_pmu->version == PCP_PMU_V3)
1154 pmu->attr_groups = iob_fast_pmu_v3_attr_groups;
1155 else
1156 pmu->attr_groups = iob_pmu_attr_groups;
1157 break;
1158 case PMU_TYPE_IOB_SLOW:
1159 if (xgene_pmu->version == PCP_PMU_V3)
1160 pmu->attr_groups = iob_slow_pmu_v3_attr_groups;
1161 break;
1162 case PMU_TYPE_MCB:
1163 if (!(xgene_pmu->mcb_active_mask & pmu->inf->enable_mask))
1164 return -ENODEV;
1165 if (xgene_pmu->version == PCP_PMU_V3)
1166 pmu->attr_groups = mcb_pmu_v3_attr_groups;
1167 else
1168 pmu->attr_groups = mcb_pmu_attr_groups;
1169 break;
1170 case PMU_TYPE_MC:
1171 if (!(xgene_pmu->mc_active_mask & pmu->inf->enable_mask))
1172 return -ENODEV;
1173 if (xgene_pmu->version == PCP_PMU_V3)
1174 pmu->attr_groups = mc_pmu_v3_attr_groups;
1175 else
1176 pmu->attr_groups = mc_pmu_attr_groups;
1177 break;
1178 default:
1179 return -EINVAL;
1182 if (xgene_init_perf(pmu, ctx->name)) {
1183 dev_err(dev, "%s PMU: Failed to init perf driver\n", ctx->name);
1184 return -ENODEV;
1187 dev_info(dev, "%s PMU registered\n", ctx->name);
1189 return 0;
1192 static void _xgene_pmu_isr(int irq, struct xgene_pmu_dev *pmu_dev)
1194 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
1195 void __iomem *csr = pmu_dev->inf->csr;
1196 u32 pmovsr;
1197 int idx;
1199 xgene_pmu->ops->stop_counters(pmu_dev);
1201 if (xgene_pmu->version == PCP_PMU_V3)
1202 pmovsr = readl(csr + PMU_PMOVSSET) & PMU_OVERFLOW_MASK;
1203 else
1204 pmovsr = readl(csr + PMU_PMOVSR) & PMU_OVERFLOW_MASK;
1206 if (!pmovsr)
1207 goto out;
1209 /* Clear interrupt flag */
1210 if (xgene_pmu->version == PCP_PMU_V1)
1211 writel(0x0, csr + PMU_PMOVSR);
1212 else if (xgene_pmu->version == PCP_PMU_V2)
1213 writel(pmovsr, csr + PMU_PMOVSR);
1214 else
1215 writel(pmovsr, csr + PMU_PMOVSCLR);
1217 for (idx = 0; idx < PMU_MAX_COUNTERS; idx++) {
1218 struct perf_event *event = pmu_dev->pmu_counter_event[idx];
1219 int overflowed = pmovsr & BIT(idx);
1221 /* Ignore if we don't have an event. */
1222 if (!event || !overflowed)
1223 continue;
1224 xgene_perf_event_update(event);
1225 xgene_perf_event_set_period(event);
1228 out:
1229 xgene_pmu->ops->start_counters(pmu_dev);
1232 static irqreturn_t xgene_pmu_isr(int irq, void *dev_id)
1234 u32 intr_mcu, intr_mcb, intr_l3c, intr_iob;
1235 struct xgene_pmu_dev_ctx *ctx;
1236 struct xgene_pmu *xgene_pmu = dev_id;
1237 unsigned long flags;
1238 u32 val;
1240 raw_spin_lock_irqsave(&xgene_pmu->lock, flags);
1242 /* Get Interrupt PMU source */
1243 val = readl(xgene_pmu->pcppmu_csr + PCPPMU_INTSTATUS_REG);
1244 if (xgene_pmu->version == PCP_PMU_V3) {
1245 intr_mcu = PCPPMU_V3_INT_MCU;
1246 intr_mcb = PCPPMU_V3_INT_MCB;
1247 intr_l3c = PCPPMU_V3_INT_L3C;
1248 intr_iob = PCPPMU_V3_INT_IOB;
1249 } else {
1250 intr_mcu = PCPPMU_INT_MCU;
1251 intr_mcb = PCPPMU_INT_MCB;
1252 intr_l3c = PCPPMU_INT_L3C;
1253 intr_iob = PCPPMU_INT_IOB;
1255 if (val & intr_mcu) {
1256 list_for_each_entry(ctx, &xgene_pmu->mcpmus, next) {
1257 _xgene_pmu_isr(irq, ctx->pmu_dev);
1260 if (val & intr_mcb) {
1261 list_for_each_entry(ctx, &xgene_pmu->mcbpmus, next) {
1262 _xgene_pmu_isr(irq, ctx->pmu_dev);
1265 if (val & intr_l3c) {
1266 list_for_each_entry(ctx, &xgene_pmu->l3cpmus, next) {
1267 _xgene_pmu_isr(irq, ctx->pmu_dev);
1270 if (val & intr_iob) {
1271 list_for_each_entry(ctx, &xgene_pmu->iobpmus, next) {
1272 _xgene_pmu_isr(irq, ctx->pmu_dev);
1276 raw_spin_unlock_irqrestore(&xgene_pmu->lock, flags);
1278 return IRQ_HANDLED;
1281 static int acpi_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu,
1282 struct platform_device *pdev)
1284 void __iomem *csw_csr, *mcba_csr, *mcbb_csr;
1285 unsigned int reg;
1287 csw_csr = devm_platform_ioremap_resource(pdev, 1);
1288 if (IS_ERR(csw_csr)) {
1289 dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n");
1290 return PTR_ERR(csw_csr);
1293 mcba_csr = devm_platform_ioremap_resource(pdev, 2);
1294 if (IS_ERR(mcba_csr)) {
1295 dev_err(&pdev->dev, "ioremap failed for MCBA CSR resource\n");
1296 return PTR_ERR(mcba_csr);
1299 mcbb_csr = devm_platform_ioremap_resource(pdev, 3);
1300 if (IS_ERR(mcbb_csr)) {
1301 dev_err(&pdev->dev, "ioremap failed for MCBB CSR resource\n");
1302 return PTR_ERR(mcbb_csr);
1305 xgene_pmu->l3c_active_mask = 0x1;
1307 reg = readl(csw_csr + CSW_CSWCR);
1308 if (reg & CSW_CSWCR_DUALMCB_MASK) {
1309 /* Dual MCB active */
1310 xgene_pmu->mcb_active_mask = 0x3;
1311 /* Probe all active MC(s) */
1312 reg = readl(mcbb_csr + CSW_CSWCR);
1313 xgene_pmu->mc_active_mask =
1314 (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5;
1315 } else {
1316 /* Single MCB active */
1317 xgene_pmu->mcb_active_mask = 0x1;
1318 /* Probe all active MC(s) */
1319 reg = readl(mcba_csr + CSW_CSWCR);
1320 xgene_pmu->mc_active_mask =
1321 (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1;
1324 return 0;
1327 static int acpi_pmu_v3_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu,
1328 struct platform_device *pdev)
1330 void __iomem *csw_csr;
1331 unsigned int reg;
1332 u32 mcb0routing;
1333 u32 mcb1routing;
1335 csw_csr = devm_platform_ioremap_resource(pdev, 1);
1336 if (IS_ERR(csw_csr)) {
1337 dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n");
1338 return PTR_ERR(csw_csr);
1341 reg = readl(csw_csr + CSW_CSWCR);
1342 mcb0routing = CSW_CSWCR_MCB0_ROUTING(reg);
1343 mcb1routing = CSW_CSWCR_MCB1_ROUTING(reg);
1344 if (reg & CSW_CSWCR_DUALMCB_MASK) {
1345 /* Dual MCB active */
1346 xgene_pmu->mcb_active_mask = 0x3;
1347 /* Probe all active L3C(s), maximum is 8 */
1348 xgene_pmu->l3c_active_mask = 0xFF;
1349 /* Probe all active MC(s), maximum is 8 */
1350 if ((mcb0routing == 0x2) && (mcb1routing == 0x2))
1351 xgene_pmu->mc_active_mask = 0xFF;
1352 else if ((mcb0routing == 0x1) && (mcb1routing == 0x1))
1353 xgene_pmu->mc_active_mask = 0x33;
1354 else
1355 xgene_pmu->mc_active_mask = 0x11;
1356 } else {
1357 /* Single MCB active */
1358 xgene_pmu->mcb_active_mask = 0x1;
1359 /* Probe all active L3C(s), maximum is 4 */
1360 xgene_pmu->l3c_active_mask = 0x0F;
1361 /* Probe all active MC(s), maximum is 4 */
1362 if (mcb0routing == 0x2)
1363 xgene_pmu->mc_active_mask = 0x0F;
1364 else if (mcb0routing == 0x1)
1365 xgene_pmu->mc_active_mask = 0x03;
1366 else
1367 xgene_pmu->mc_active_mask = 0x01;
1370 return 0;
1373 static int fdt_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu,
1374 struct platform_device *pdev)
1376 struct regmap *csw_map, *mcba_map, *mcbb_map;
1377 struct device_node *np = pdev->dev.of_node;
1378 unsigned int reg;
1380 csw_map = syscon_regmap_lookup_by_phandle(np, "regmap-csw");
1381 if (IS_ERR(csw_map)) {
1382 dev_err(&pdev->dev, "unable to get syscon regmap csw\n");
1383 return PTR_ERR(csw_map);
1386 mcba_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcba");
1387 if (IS_ERR(mcba_map)) {
1388 dev_err(&pdev->dev, "unable to get syscon regmap mcba\n");
1389 return PTR_ERR(mcba_map);
1392 mcbb_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcbb");
1393 if (IS_ERR(mcbb_map)) {
1394 dev_err(&pdev->dev, "unable to get syscon regmap mcbb\n");
1395 return PTR_ERR(mcbb_map);
1398 xgene_pmu->l3c_active_mask = 0x1;
1399 if (regmap_read(csw_map, CSW_CSWCR, &reg))
1400 return -EINVAL;
1402 if (reg & CSW_CSWCR_DUALMCB_MASK) {
1403 /* Dual MCB active */
1404 xgene_pmu->mcb_active_mask = 0x3;
1405 /* Probe all active MC(s) */
1406 if (regmap_read(mcbb_map, MCBADDRMR, &reg))
1407 return 0;
1408 xgene_pmu->mc_active_mask =
1409 (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5;
1410 } else {
1411 /* Single MCB active */
1412 xgene_pmu->mcb_active_mask = 0x1;
1413 /* Probe all active MC(s) */
1414 if (regmap_read(mcba_map, MCBADDRMR, &reg))
1415 return 0;
1416 xgene_pmu->mc_active_mask =
1417 (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1;
1420 return 0;
1423 static int xgene_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu,
1424 struct platform_device *pdev)
1426 if (has_acpi_companion(&pdev->dev)) {
1427 if (xgene_pmu->version == PCP_PMU_V3)
1428 return acpi_pmu_v3_probe_active_mcb_mcu_l3c(xgene_pmu,
1429 pdev);
1430 else
1431 return acpi_pmu_probe_active_mcb_mcu_l3c(xgene_pmu,
1432 pdev);
1434 return fdt_pmu_probe_active_mcb_mcu_l3c(xgene_pmu, pdev);
1437 static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
1439 switch (type) {
1440 case PMU_TYPE_L3C:
1441 return devm_kasprintf(dev, GFP_KERNEL, "l3c%d", id);
1442 case PMU_TYPE_IOB:
1443 return devm_kasprintf(dev, GFP_KERNEL, "iob%d", id);
1444 case PMU_TYPE_IOB_SLOW:
1445 return devm_kasprintf(dev, GFP_KERNEL, "iob_slow%d", id);
1446 case PMU_TYPE_MCB:
1447 return devm_kasprintf(dev, GFP_KERNEL, "mcb%d", id);
1448 case PMU_TYPE_MC:
1449 return devm_kasprintf(dev, GFP_KERNEL, "mc%d", id);
1450 default:
1451 return devm_kasprintf(dev, GFP_KERNEL, "unknown");
1455 #if defined(CONFIG_ACPI)
1456 static struct
1457 xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
1458 struct acpi_device *adev, u32 type)
1460 struct device *dev = xgene_pmu->dev;
1461 struct list_head resource_list;
1462 struct xgene_pmu_dev_ctx *ctx;
1463 const union acpi_object *obj;
1464 struct hw_pmu_info *inf;
1465 void __iomem *dev_csr;
1466 struct resource res;
1467 struct resource_entry *rentry;
1468 int enable_bit;
1469 int rc;
1471 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1472 if (!ctx)
1473 return NULL;
1475 INIT_LIST_HEAD(&resource_list);
1476 rc = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
1477 if (rc <= 0) {
1478 dev_err(dev, "PMU type %d: No resources found\n", type);
1479 return NULL;
1482 list_for_each_entry(rentry, &resource_list, node) {
1483 if (resource_type(rentry->res) == IORESOURCE_MEM) {
1484 res = *rentry->res;
1485 rentry = NULL;
1486 break;
1489 acpi_dev_free_resource_list(&resource_list);
1491 if (rentry) {
1492 dev_err(dev, "PMU type %d: No memory resource found\n", type);
1493 return NULL;
1496 dev_csr = devm_ioremap_resource(dev, &res);
1497 if (IS_ERR(dev_csr)) {
1498 dev_err(dev, "PMU type %d: Fail to map resource\n", type);
1499 return NULL;
1502 /* A PMU device node without enable-bit-index is always enabled */
1503 rc = acpi_dev_get_property(adev, "enable-bit-index",
1504 ACPI_TYPE_INTEGER, &obj);
1505 if (rc < 0)
1506 enable_bit = 0;
1507 else
1508 enable_bit = (int) obj->integer.value;
1510 ctx->name = xgene_pmu_dev_name(dev, type, enable_bit);
1511 if (!ctx->name) {
1512 dev_err(dev, "PMU type %d: Fail to get device name\n", type);
1513 return NULL;
1515 inf = &ctx->inf;
1516 inf->type = type;
1517 inf->csr = dev_csr;
1518 inf->enable_mask = 1 << enable_bit;
1520 return ctx;
1523 static const struct acpi_device_id xgene_pmu_acpi_type_match[] = {
1524 {"APMC0D5D", PMU_TYPE_L3C},
1525 {"APMC0D5E", PMU_TYPE_IOB},
1526 {"APMC0D5F", PMU_TYPE_MCB},
1527 {"APMC0D60", PMU_TYPE_MC},
1528 {"APMC0D84", PMU_TYPE_L3C},
1529 {"APMC0D85", PMU_TYPE_IOB},
1530 {"APMC0D86", PMU_TYPE_IOB_SLOW},
1531 {"APMC0D87", PMU_TYPE_MCB},
1532 {"APMC0D88", PMU_TYPE_MC},
1536 static const struct acpi_device_id *xgene_pmu_acpi_match_type(
1537 const struct acpi_device_id *ids,
1538 struct acpi_device *adev)
1540 const struct acpi_device_id *match_id = NULL;
1541 const struct acpi_device_id *id;
1543 for (id = ids; id->id[0] || id->cls; id++) {
1544 if (!acpi_match_device_ids(adev, id))
1545 match_id = id;
1546 else if (match_id)
1547 break;
1550 return match_id;
1553 static acpi_status acpi_pmu_dev_add(acpi_handle handle, u32 level,
1554 void *data, void **return_value)
1556 const struct acpi_device_id *acpi_id;
1557 struct xgene_pmu *xgene_pmu = data;
1558 struct xgene_pmu_dev_ctx *ctx;
1559 struct acpi_device *adev;
1561 if (acpi_bus_get_device(handle, &adev))
1562 return AE_OK;
1563 if (acpi_bus_get_status(adev) || !adev->status.present)
1564 return AE_OK;
1566 acpi_id = xgene_pmu_acpi_match_type(xgene_pmu_acpi_type_match, adev);
1567 if (!acpi_id)
1568 return AE_OK;
1570 ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, (u32)acpi_id->driver_data);
1571 if (!ctx)
1572 return AE_OK;
1574 if (xgene_pmu_dev_add(xgene_pmu, ctx)) {
1575 /* Can't add the PMU device, skip it */
1576 devm_kfree(xgene_pmu->dev, ctx);
1577 return AE_OK;
1580 switch (ctx->inf.type) {
1581 case PMU_TYPE_L3C:
1582 list_add(&ctx->next, &xgene_pmu->l3cpmus);
1583 break;
1584 case PMU_TYPE_IOB:
1585 list_add(&ctx->next, &xgene_pmu->iobpmus);
1586 break;
1587 case PMU_TYPE_IOB_SLOW:
1588 list_add(&ctx->next, &xgene_pmu->iobpmus);
1589 break;
1590 case PMU_TYPE_MCB:
1591 list_add(&ctx->next, &xgene_pmu->mcbpmus);
1592 break;
1593 case PMU_TYPE_MC:
1594 list_add(&ctx->next, &xgene_pmu->mcpmus);
1595 break;
1597 return AE_OK;
1600 static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
1601 struct platform_device *pdev)
1603 struct device *dev = xgene_pmu->dev;
1604 acpi_handle handle;
1605 acpi_status status;
1607 handle = ACPI_HANDLE(dev);
1608 if (!handle)
1609 return -EINVAL;
1611 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1612 acpi_pmu_dev_add, NULL, xgene_pmu, NULL);
1613 if (ACPI_FAILURE(status)) {
1614 dev_err(dev, "failed to probe PMU devices\n");
1615 return -ENODEV;
1618 return 0;
1620 #else
1621 static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
1622 struct platform_device *pdev)
1624 return 0;
1626 #endif
1628 static struct
1629 xgene_pmu_dev_ctx *fdt_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
1630 struct device_node *np, u32 type)
1632 struct device *dev = xgene_pmu->dev;
1633 struct xgene_pmu_dev_ctx *ctx;
1634 struct hw_pmu_info *inf;
1635 void __iomem *dev_csr;
1636 struct resource res;
1637 int enable_bit;
1639 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1640 if (!ctx)
1641 return NULL;
1643 if (of_address_to_resource(np, 0, &res) < 0) {
1644 dev_err(dev, "PMU type %d: No resource address found\n", type);
1645 return NULL;
1648 dev_csr = devm_ioremap_resource(dev, &res);
1649 if (IS_ERR(dev_csr)) {
1650 dev_err(dev, "PMU type %d: Fail to map resource\n", type);
1651 return NULL;
1654 /* A PMU device node without enable-bit-index is always enabled */
1655 if (of_property_read_u32(np, "enable-bit-index", &enable_bit))
1656 enable_bit = 0;
1658 ctx->name = xgene_pmu_dev_name(dev, type, enable_bit);
1659 if (!ctx->name) {
1660 dev_err(dev, "PMU type %d: Fail to get device name\n", type);
1661 return NULL;
1664 inf = &ctx->inf;
1665 inf->type = type;
1666 inf->csr = dev_csr;
1667 inf->enable_mask = 1 << enable_bit;
1669 return ctx;
1672 static int fdt_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
1673 struct platform_device *pdev)
1675 struct xgene_pmu_dev_ctx *ctx;
1676 struct device_node *np;
1678 for_each_child_of_node(pdev->dev.of_node, np) {
1679 if (!of_device_is_available(np))
1680 continue;
1682 if (of_device_is_compatible(np, "apm,xgene-pmu-l3c"))
1683 ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_L3C);
1684 else if (of_device_is_compatible(np, "apm,xgene-pmu-iob"))
1685 ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_IOB);
1686 else if (of_device_is_compatible(np, "apm,xgene-pmu-mcb"))
1687 ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MCB);
1688 else if (of_device_is_compatible(np, "apm,xgene-pmu-mc"))
1689 ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MC);
1690 else
1691 ctx = NULL;
1693 if (!ctx)
1694 continue;
1696 if (xgene_pmu_dev_add(xgene_pmu, ctx)) {
1697 /* Can't add the PMU device, skip it */
1698 devm_kfree(xgene_pmu->dev, ctx);
1699 continue;
1702 switch (ctx->inf.type) {
1703 case PMU_TYPE_L3C:
1704 list_add(&ctx->next, &xgene_pmu->l3cpmus);
1705 break;
1706 case PMU_TYPE_IOB:
1707 list_add(&ctx->next, &xgene_pmu->iobpmus);
1708 break;
1709 case PMU_TYPE_IOB_SLOW:
1710 list_add(&ctx->next, &xgene_pmu->iobpmus);
1711 break;
1712 case PMU_TYPE_MCB:
1713 list_add(&ctx->next, &xgene_pmu->mcbpmus);
1714 break;
1715 case PMU_TYPE_MC:
1716 list_add(&ctx->next, &xgene_pmu->mcpmus);
1717 break;
1721 return 0;
1724 static int xgene_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
1725 struct platform_device *pdev)
1727 if (has_acpi_companion(&pdev->dev))
1728 return acpi_pmu_probe_pmu_dev(xgene_pmu, pdev);
1729 return fdt_pmu_probe_pmu_dev(xgene_pmu, pdev);
1732 static const struct xgene_pmu_data xgene_pmu_data = {
1733 .id = PCP_PMU_V1,
1736 static const struct xgene_pmu_data xgene_pmu_v2_data = {
1737 .id = PCP_PMU_V2,
1740 static const struct xgene_pmu_ops xgene_pmu_ops = {
1741 .mask_int = xgene_pmu_mask_int,
1742 .unmask_int = xgene_pmu_unmask_int,
1743 .read_counter = xgene_pmu_read_counter32,
1744 .write_counter = xgene_pmu_write_counter32,
1745 .write_evttype = xgene_pmu_write_evttype,
1746 .write_agentmsk = xgene_pmu_write_agentmsk,
1747 .write_agent1msk = xgene_pmu_write_agent1msk,
1748 .enable_counter = xgene_pmu_enable_counter,
1749 .disable_counter = xgene_pmu_disable_counter,
1750 .enable_counter_int = xgene_pmu_enable_counter_int,
1751 .disable_counter_int = xgene_pmu_disable_counter_int,
1752 .reset_counters = xgene_pmu_reset_counters,
1753 .start_counters = xgene_pmu_start_counters,
1754 .stop_counters = xgene_pmu_stop_counters,
1757 static const struct xgene_pmu_ops xgene_pmu_v3_ops = {
1758 .mask_int = xgene_pmu_v3_mask_int,
1759 .unmask_int = xgene_pmu_v3_unmask_int,
1760 .read_counter = xgene_pmu_read_counter64,
1761 .write_counter = xgene_pmu_write_counter64,
1762 .write_evttype = xgene_pmu_write_evttype,
1763 .write_agentmsk = xgene_pmu_v3_write_agentmsk,
1764 .write_agent1msk = xgene_pmu_v3_write_agent1msk,
1765 .enable_counter = xgene_pmu_enable_counter,
1766 .disable_counter = xgene_pmu_disable_counter,
1767 .enable_counter_int = xgene_pmu_enable_counter_int,
1768 .disable_counter_int = xgene_pmu_disable_counter_int,
1769 .reset_counters = xgene_pmu_reset_counters,
1770 .start_counters = xgene_pmu_start_counters,
1771 .stop_counters = xgene_pmu_stop_counters,
1774 static const struct of_device_id xgene_pmu_of_match[] = {
1775 { .compatible = "apm,xgene-pmu", .data = &xgene_pmu_data },
1776 { .compatible = "apm,xgene-pmu-v2", .data = &xgene_pmu_v2_data },
1779 MODULE_DEVICE_TABLE(of, xgene_pmu_of_match);
1780 #ifdef CONFIG_ACPI
1781 static const struct acpi_device_id xgene_pmu_acpi_match[] = {
1782 {"APMC0D5B", PCP_PMU_V1},
1783 {"APMC0D5C", PCP_PMU_V2},
1784 {"APMC0D83", PCP_PMU_V3},
1787 MODULE_DEVICE_TABLE(acpi, xgene_pmu_acpi_match);
1788 #endif
1790 static int xgene_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
1792 struct xgene_pmu *xgene_pmu = hlist_entry_safe(node, struct xgene_pmu,
1793 node);
1795 if (cpumask_empty(&xgene_pmu->cpu))
1796 cpumask_set_cpu(cpu, &xgene_pmu->cpu);
1798 /* Overflow interrupt also should use the same CPU */
1799 WARN_ON(irq_set_affinity(xgene_pmu->irq, &xgene_pmu->cpu));
1801 return 0;
1804 static int xgene_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
1806 struct xgene_pmu *xgene_pmu = hlist_entry_safe(node, struct xgene_pmu,
1807 node);
1808 struct xgene_pmu_dev_ctx *ctx;
1809 unsigned int target;
1811 if (!cpumask_test_and_clear_cpu(cpu, &xgene_pmu->cpu))
1812 return 0;
1813 target = cpumask_any_but(cpu_online_mask, cpu);
1814 if (target >= nr_cpu_ids)
1815 return 0;
1817 list_for_each_entry(ctx, &xgene_pmu->mcpmus, next) {
1818 perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
1820 list_for_each_entry(ctx, &xgene_pmu->mcbpmus, next) {
1821 perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
1823 list_for_each_entry(ctx, &xgene_pmu->l3cpmus, next) {
1824 perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
1826 list_for_each_entry(ctx, &xgene_pmu->iobpmus, next) {
1827 perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
1830 cpumask_set_cpu(target, &xgene_pmu->cpu);
1831 /* Overflow interrupt also should use the same CPU */
1832 WARN_ON(irq_set_affinity(xgene_pmu->irq, &xgene_pmu->cpu));
1834 return 0;
1837 static int xgene_pmu_probe(struct platform_device *pdev)
1839 const struct xgene_pmu_data *dev_data;
1840 const struct of_device_id *of_id;
1841 struct xgene_pmu *xgene_pmu;
1842 struct resource *res;
1843 int irq, rc;
1844 int version;
1846 /* Install a hook to update the reader CPU in case it goes offline */
1847 rc = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
1848 "CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE",
1849 xgene_pmu_online_cpu,
1850 xgene_pmu_offline_cpu);
1851 if (rc)
1852 return rc;
1854 xgene_pmu = devm_kzalloc(&pdev->dev, sizeof(*xgene_pmu), GFP_KERNEL);
1855 if (!xgene_pmu)
1856 return -ENOMEM;
1857 xgene_pmu->dev = &pdev->dev;
1858 platform_set_drvdata(pdev, xgene_pmu);
1860 version = -EINVAL;
1861 of_id = of_match_device(xgene_pmu_of_match, &pdev->dev);
1862 if (of_id) {
1863 dev_data = (const struct xgene_pmu_data *) of_id->data;
1864 version = dev_data->id;
1867 #ifdef CONFIG_ACPI
1868 if (ACPI_COMPANION(&pdev->dev)) {
1869 const struct acpi_device_id *acpi_id;
1871 acpi_id = acpi_match_device(xgene_pmu_acpi_match, &pdev->dev);
1872 if (acpi_id)
1873 version = (int) acpi_id->driver_data;
1875 #endif
1876 if (version < 0)
1877 return -ENODEV;
1879 if (version == PCP_PMU_V3)
1880 xgene_pmu->ops = &xgene_pmu_v3_ops;
1881 else
1882 xgene_pmu->ops = &xgene_pmu_ops;
1884 INIT_LIST_HEAD(&xgene_pmu->l3cpmus);
1885 INIT_LIST_HEAD(&xgene_pmu->iobpmus);
1886 INIT_LIST_HEAD(&xgene_pmu->mcbpmus);
1887 INIT_LIST_HEAD(&xgene_pmu->mcpmus);
1889 xgene_pmu->version = version;
1890 dev_info(&pdev->dev, "X-Gene PMU version %d\n", xgene_pmu->version);
1892 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1893 xgene_pmu->pcppmu_csr = devm_ioremap_resource(&pdev->dev, res);
1894 if (IS_ERR(xgene_pmu->pcppmu_csr)) {
1895 dev_err(&pdev->dev, "ioremap failed for PCP PMU resource\n");
1896 return PTR_ERR(xgene_pmu->pcppmu_csr);
1899 irq = platform_get_irq(pdev, 0);
1900 if (irq < 0)
1901 return -EINVAL;
1903 rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr,
1904 IRQF_NOBALANCING | IRQF_NO_THREAD,
1905 dev_name(&pdev->dev), xgene_pmu);
1906 if (rc) {
1907 dev_err(&pdev->dev, "Could not request IRQ %d\n", irq);
1908 return rc;
1911 xgene_pmu->irq = irq;
1913 raw_spin_lock_init(&xgene_pmu->lock);
1915 /* Check for active MCBs and MCUs */
1916 rc = xgene_pmu_probe_active_mcb_mcu_l3c(xgene_pmu, pdev);
1917 if (rc) {
1918 dev_warn(&pdev->dev, "Unknown MCB/MCU active status\n");
1919 xgene_pmu->mcb_active_mask = 0x1;
1920 xgene_pmu->mc_active_mask = 0x1;
1923 /* Add this instance to the list used by the hotplug callback */
1924 rc = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
1925 &xgene_pmu->node);
1926 if (rc) {
1927 dev_err(&pdev->dev, "Error %d registering hotplug", rc);
1928 return rc;
1931 /* Walk through the tree for all PMU perf devices */
1932 rc = xgene_pmu_probe_pmu_dev(xgene_pmu, pdev);
1933 if (rc) {
1934 dev_err(&pdev->dev, "No PMU perf devices found!\n");
1935 goto out_unregister;
1938 /* Enable interrupt */
1939 xgene_pmu->ops->unmask_int(xgene_pmu);
1941 return 0;
1943 out_unregister:
1944 cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
1945 &xgene_pmu->node);
1946 return rc;
1949 static void
1950 xgene_pmu_dev_cleanup(struct xgene_pmu *xgene_pmu, struct list_head *pmus)
1952 struct xgene_pmu_dev_ctx *ctx;
1954 list_for_each_entry(ctx, pmus, next) {
1955 perf_pmu_unregister(&ctx->pmu_dev->pmu);
1959 static int xgene_pmu_remove(struct platform_device *pdev)
1961 struct xgene_pmu *xgene_pmu = dev_get_drvdata(&pdev->dev);
1963 xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->l3cpmus);
1964 xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->iobpmus);
1965 xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcbpmus);
1966 xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcpmus);
1967 cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
1968 &xgene_pmu->node);
1970 return 0;
1973 static struct platform_driver xgene_pmu_driver = {
1974 .probe = xgene_pmu_probe,
1975 .remove = xgene_pmu_remove,
1976 .driver = {
1977 .name = "xgene-pmu",
1978 .of_match_table = xgene_pmu_of_match,
1979 .acpi_match_table = ACPI_PTR(xgene_pmu_acpi_match),
1980 .suppress_bind_attrs = true,
1984 builtin_platform_driver(xgene_pmu_driver);