1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
4 #include <linux/errno.h>
5 #include <linux/interrupt.h>
6 #include <linux/module.h>
8 #include <linux/perf_event.h>
9 #include <linux/platform_device.h>
11 #define CSKY_PMU_MAX_EVENTS 32
12 #define DEFAULT_COUNT_WIDTH 48
14 #define HPCR "<0, 0x0>" /* PMU Control reg */
15 #define HPSPR "<0, 0x1>" /* Start PC reg */
16 #define HPEPR "<0, 0x2>" /* End PC reg */
17 #define HPSIR "<0, 0x3>" /* Soft Counter reg */
18 #define HPCNTENR "<0, 0x4>" /* Count Enable reg */
19 #define HPINTENR "<0, 0x5>" /* Interrupt Enable reg */
20 #define HPOFSR "<0, 0x6>" /* Interrupt Status reg */
22 /* The events for a given PMU register set. */
23 struct pmu_hw_events
{
25 * The events that are active on the PMU for the given index.
27 struct perf_event
*events
[CSKY_PMU_MAX_EVENTS
];
30 * A 1 bit for an index indicates that the counter is being used for
31 * an event. A 0 means that the counter can be used.
33 unsigned long used_mask
[BITS_TO_LONGS(CSKY_PMU_MAX_EVENTS
)];
36 static uint64_t (*hw_raw_read_mapping
[CSKY_PMU_MAX_EVENTS
])(void);
37 static void (*hw_raw_write_mapping
[CSKY_PMU_MAX_EVENTS
])(uint64_t val
);
39 static struct csky_pmu_t
{
41 struct pmu_hw_events __percpu
*hw_events
;
42 struct platform_device
*plat_device
;
47 static int csky_pmu_irq
;
49 #define to_csky_pmu(p) (container_of(p, struct csky_pmu, pmu))
54 asm volatile("cprgr %0, "reg"\n" \
61 #define cpwgr(reg, val) \
73 asm volatile("cprcr %0, "reg"\n" \
80 #define cpwcr(reg, val) \
90 static uint64_t csky_pmu_read_cc(void)
96 tmp
= cprgr("<0, 0x3>");
97 lo
= cprgr("<0, 0x2>");
98 hi
= cprgr("<0, 0x3>");
101 result
= (uint64_t) (hi
) << 32;
107 static void csky_pmu_write_cc(uint64_t val
)
109 cpwgr("<0, 0x2>", (uint32_t) val
);
110 cpwgr("<0, 0x3>", (uint32_t) (val
>> 32));
113 /* instruction counter */
114 static uint64_t csky_pmu_read_ic(void)
116 uint32_t lo
, hi
, tmp
;
120 tmp
= cprgr("<0, 0x5>");
121 lo
= cprgr("<0, 0x4>");
122 hi
= cprgr("<0, 0x5>");
125 result
= (uint64_t) (hi
) << 32;
131 static void csky_pmu_write_ic(uint64_t val
)
133 cpwgr("<0, 0x4>", (uint32_t) val
);
134 cpwgr("<0, 0x5>", (uint32_t) (val
>> 32));
137 /* l1 icache access counter */
138 static uint64_t csky_pmu_read_icac(void)
140 uint32_t lo
, hi
, tmp
;
144 tmp
= cprgr("<0, 0x7>");
145 lo
= cprgr("<0, 0x6>");
146 hi
= cprgr("<0, 0x7>");
149 result
= (uint64_t) (hi
) << 32;
155 static void csky_pmu_write_icac(uint64_t val
)
157 cpwgr("<0, 0x6>", (uint32_t) val
);
158 cpwgr("<0, 0x7>", (uint32_t) (val
>> 32));
161 /* l1 icache miss counter */
162 static uint64_t csky_pmu_read_icmc(void)
164 uint32_t lo
, hi
, tmp
;
168 tmp
= cprgr("<0, 0x9>");
169 lo
= cprgr("<0, 0x8>");
170 hi
= cprgr("<0, 0x9>");
173 result
= (uint64_t) (hi
) << 32;
179 static void csky_pmu_write_icmc(uint64_t val
)
181 cpwgr("<0, 0x8>", (uint32_t) val
);
182 cpwgr("<0, 0x9>", (uint32_t) (val
>> 32));
185 /* l1 dcache access counter */
186 static uint64_t csky_pmu_read_dcac(void)
188 uint32_t lo
, hi
, tmp
;
192 tmp
= cprgr("<0, 0xb>");
193 lo
= cprgr("<0, 0xa>");
194 hi
= cprgr("<0, 0xb>");
197 result
= (uint64_t) (hi
) << 32;
203 static void csky_pmu_write_dcac(uint64_t val
)
205 cpwgr("<0, 0xa>", (uint32_t) val
);
206 cpwgr("<0, 0xb>", (uint32_t) (val
>> 32));
209 /* l1 dcache miss counter */
210 static uint64_t csky_pmu_read_dcmc(void)
212 uint32_t lo
, hi
, tmp
;
216 tmp
= cprgr("<0, 0xd>");
217 lo
= cprgr("<0, 0xc>");
218 hi
= cprgr("<0, 0xd>");
221 result
= (uint64_t) (hi
) << 32;
227 static void csky_pmu_write_dcmc(uint64_t val
)
229 cpwgr("<0, 0xc>", (uint32_t) val
);
230 cpwgr("<0, 0xd>", (uint32_t) (val
>> 32));
233 /* l2 cache access counter */
234 static uint64_t csky_pmu_read_l2ac(void)
236 uint32_t lo
, hi
, tmp
;
240 tmp
= cprgr("<0, 0xf>");
241 lo
= cprgr("<0, 0xe>");
242 hi
= cprgr("<0, 0xf>");
245 result
= (uint64_t) (hi
) << 32;
251 static void csky_pmu_write_l2ac(uint64_t val
)
253 cpwgr("<0, 0xe>", (uint32_t) val
);
254 cpwgr("<0, 0xf>", (uint32_t) (val
>> 32));
257 /* l2 cache miss counter */
258 static uint64_t csky_pmu_read_l2mc(void)
260 uint32_t lo
, hi
, tmp
;
264 tmp
= cprgr("<0, 0x11>");
265 lo
= cprgr("<0, 0x10>");
266 hi
= cprgr("<0, 0x11>");
269 result
= (uint64_t) (hi
) << 32;
275 static void csky_pmu_write_l2mc(uint64_t val
)
277 cpwgr("<0, 0x10>", (uint32_t) val
);
278 cpwgr("<0, 0x11>", (uint32_t) (val
>> 32));
281 /* I-UTLB miss counter */
282 static uint64_t csky_pmu_read_iutlbmc(void)
284 uint32_t lo
, hi
, tmp
;
288 tmp
= cprgr("<0, 0x15>");
289 lo
= cprgr("<0, 0x14>");
290 hi
= cprgr("<0, 0x15>");
293 result
= (uint64_t) (hi
) << 32;
299 static void csky_pmu_write_iutlbmc(uint64_t val
)
301 cpwgr("<0, 0x14>", (uint32_t) val
);
302 cpwgr("<0, 0x15>", (uint32_t) (val
>> 32));
305 /* D-UTLB miss counter */
306 static uint64_t csky_pmu_read_dutlbmc(void)
308 uint32_t lo
, hi
, tmp
;
312 tmp
= cprgr("<0, 0x17>");
313 lo
= cprgr("<0, 0x16>");
314 hi
= cprgr("<0, 0x17>");
317 result
= (uint64_t) (hi
) << 32;
323 static void csky_pmu_write_dutlbmc(uint64_t val
)
325 cpwgr("<0, 0x16>", (uint32_t) val
);
326 cpwgr("<0, 0x17>", (uint32_t) (val
>> 32));
329 /* JTLB miss counter */
330 static uint64_t csky_pmu_read_jtlbmc(void)
332 uint32_t lo
, hi
, tmp
;
336 tmp
= cprgr("<0, 0x19>");
337 lo
= cprgr("<0, 0x18>");
338 hi
= cprgr("<0, 0x19>");
341 result
= (uint64_t) (hi
) << 32;
347 static void csky_pmu_write_jtlbmc(uint64_t val
)
349 cpwgr("<0, 0x18>", (uint32_t) val
);
350 cpwgr("<0, 0x19>", (uint32_t) (val
>> 32));
353 /* software counter */
354 static uint64_t csky_pmu_read_softc(void)
356 uint32_t lo
, hi
, tmp
;
360 tmp
= cprgr("<0, 0x1b>");
361 lo
= cprgr("<0, 0x1a>");
362 hi
= cprgr("<0, 0x1b>");
365 result
= (uint64_t) (hi
) << 32;
371 static void csky_pmu_write_softc(uint64_t val
)
373 cpwgr("<0, 0x1a>", (uint32_t) val
);
374 cpwgr("<0, 0x1b>", (uint32_t) (val
>> 32));
377 /* conditional branch mispredict counter */
378 static uint64_t csky_pmu_read_cbmc(void)
380 uint32_t lo
, hi
, tmp
;
384 tmp
= cprgr("<0, 0x1d>");
385 lo
= cprgr("<0, 0x1c>");
386 hi
= cprgr("<0, 0x1d>");
389 result
= (uint64_t) (hi
) << 32;
395 static void csky_pmu_write_cbmc(uint64_t val
)
397 cpwgr("<0, 0x1c>", (uint32_t) val
);
398 cpwgr("<0, 0x1d>", (uint32_t) (val
>> 32));
401 /* conditional branch instruction counter */
402 static uint64_t csky_pmu_read_cbic(void)
404 uint32_t lo
, hi
, tmp
;
408 tmp
= cprgr("<0, 0x1f>");
409 lo
= cprgr("<0, 0x1e>");
410 hi
= cprgr("<0, 0x1f>");
413 result
= (uint64_t) (hi
) << 32;
419 static void csky_pmu_write_cbic(uint64_t val
)
421 cpwgr("<0, 0x1e>", (uint32_t) val
);
422 cpwgr("<0, 0x1f>", (uint32_t) (val
>> 32));
425 /* indirect branch mispredict counter */
426 static uint64_t csky_pmu_read_ibmc(void)
428 uint32_t lo
, hi
, tmp
;
432 tmp
= cprgr("<0, 0x21>");
433 lo
= cprgr("<0, 0x20>");
434 hi
= cprgr("<0, 0x21>");
437 result
= (uint64_t) (hi
) << 32;
443 static void csky_pmu_write_ibmc(uint64_t val
)
445 cpwgr("<0, 0x20>", (uint32_t) val
);
446 cpwgr("<0, 0x21>", (uint32_t) (val
>> 32));
449 /* indirect branch instruction counter */
450 static uint64_t csky_pmu_read_ibic(void)
452 uint32_t lo
, hi
, tmp
;
456 tmp
= cprgr("<0, 0x23>");
457 lo
= cprgr("<0, 0x22>");
458 hi
= cprgr("<0, 0x23>");
461 result
= (uint64_t) (hi
) << 32;
467 static void csky_pmu_write_ibic(uint64_t val
)
469 cpwgr("<0, 0x22>", (uint32_t) val
);
470 cpwgr("<0, 0x23>", (uint32_t) (val
>> 32));
473 /* LSU spec fail counter */
474 static uint64_t csky_pmu_read_lsfc(void)
476 uint32_t lo
, hi
, tmp
;
480 tmp
= cprgr("<0, 0x25>");
481 lo
= cprgr("<0, 0x24>");
482 hi
= cprgr("<0, 0x25>");
485 result
= (uint64_t) (hi
) << 32;
491 static void csky_pmu_write_lsfc(uint64_t val
)
493 cpwgr("<0, 0x24>", (uint32_t) val
);
494 cpwgr("<0, 0x25>", (uint32_t) (val
>> 32));
497 /* store instruction counter */
498 static uint64_t csky_pmu_read_sic(void)
500 uint32_t lo
, hi
, tmp
;
504 tmp
= cprgr("<0, 0x27>");
505 lo
= cprgr("<0, 0x26>");
506 hi
= cprgr("<0, 0x27>");
509 result
= (uint64_t) (hi
) << 32;
515 static void csky_pmu_write_sic(uint64_t val
)
517 cpwgr("<0, 0x26>", (uint32_t) val
);
518 cpwgr("<0, 0x27>", (uint32_t) (val
>> 32));
521 /* dcache read access counter */
522 static uint64_t csky_pmu_read_dcrac(void)
524 uint32_t lo
, hi
, tmp
;
528 tmp
= cprgr("<0, 0x29>");
529 lo
= cprgr("<0, 0x28>");
530 hi
= cprgr("<0, 0x29>");
533 result
= (uint64_t) (hi
) << 32;
539 static void csky_pmu_write_dcrac(uint64_t val
)
541 cpwgr("<0, 0x28>", (uint32_t) val
);
542 cpwgr("<0, 0x29>", (uint32_t) (val
>> 32));
545 /* dcache read miss counter */
546 static uint64_t csky_pmu_read_dcrmc(void)
548 uint32_t lo
, hi
, tmp
;
552 tmp
= cprgr("<0, 0x2b>");
553 lo
= cprgr("<0, 0x2a>");
554 hi
= cprgr("<0, 0x2b>");
557 result
= (uint64_t) (hi
) << 32;
563 static void csky_pmu_write_dcrmc(uint64_t val
)
565 cpwgr("<0, 0x2a>", (uint32_t) val
);
566 cpwgr("<0, 0x2b>", (uint32_t) (val
>> 32));
569 /* dcache write access counter */
570 static uint64_t csky_pmu_read_dcwac(void)
572 uint32_t lo
, hi
, tmp
;
576 tmp
= cprgr("<0, 0x2d>");
577 lo
= cprgr("<0, 0x2c>");
578 hi
= cprgr("<0, 0x2d>");
581 result
= (uint64_t) (hi
) << 32;
587 static void csky_pmu_write_dcwac(uint64_t val
)
589 cpwgr("<0, 0x2c>", (uint32_t) val
);
590 cpwgr("<0, 0x2d>", (uint32_t) (val
>> 32));
593 /* dcache write miss counter */
594 static uint64_t csky_pmu_read_dcwmc(void)
596 uint32_t lo
, hi
, tmp
;
600 tmp
= cprgr("<0, 0x2f>");
601 lo
= cprgr("<0, 0x2e>");
602 hi
= cprgr("<0, 0x2f>");
605 result
= (uint64_t) (hi
) << 32;
611 static void csky_pmu_write_dcwmc(uint64_t val
)
613 cpwgr("<0, 0x2e>", (uint32_t) val
);
614 cpwgr("<0, 0x2f>", (uint32_t) (val
>> 32));
617 /* l2cache read access counter */
618 static uint64_t csky_pmu_read_l2rac(void)
620 uint32_t lo
, hi
, tmp
;
624 tmp
= cprgr("<0, 0x31>");
625 lo
= cprgr("<0, 0x30>");
626 hi
= cprgr("<0, 0x31>");
629 result
= (uint64_t) (hi
) << 32;
635 static void csky_pmu_write_l2rac(uint64_t val
)
637 cpwgr("<0, 0x30>", (uint32_t) val
);
638 cpwgr("<0, 0x31>", (uint32_t) (val
>> 32));
641 /* l2cache read miss counter */
642 static uint64_t csky_pmu_read_l2rmc(void)
644 uint32_t lo
, hi
, tmp
;
648 tmp
= cprgr("<0, 0x33>");
649 lo
= cprgr("<0, 0x32>");
650 hi
= cprgr("<0, 0x33>");
653 result
= (uint64_t) (hi
) << 32;
659 static void csky_pmu_write_l2rmc(uint64_t val
)
661 cpwgr("<0, 0x32>", (uint32_t) val
);
662 cpwgr("<0, 0x33>", (uint32_t) (val
>> 32));
665 /* l2cache write access counter */
666 static uint64_t csky_pmu_read_l2wac(void)
668 uint32_t lo
, hi
, tmp
;
672 tmp
= cprgr("<0, 0x35>");
673 lo
= cprgr("<0, 0x34>");
674 hi
= cprgr("<0, 0x35>");
677 result
= (uint64_t) (hi
) << 32;
683 static void csky_pmu_write_l2wac(uint64_t val
)
685 cpwgr("<0, 0x34>", (uint32_t) val
);
686 cpwgr("<0, 0x35>", (uint32_t) (val
>> 32));
689 /* l2cache write miss counter */
690 static uint64_t csky_pmu_read_l2wmc(void)
692 uint32_t lo
, hi
, tmp
;
696 tmp
= cprgr("<0, 0x37>");
697 lo
= cprgr("<0, 0x36>");
698 hi
= cprgr("<0, 0x37>");
701 result
= (uint64_t) (hi
) << 32;
707 static void csky_pmu_write_l2wmc(uint64_t val
)
709 cpwgr("<0, 0x36>", (uint32_t) val
);
710 cpwgr("<0, 0x37>", (uint32_t) (val
>> 32));
713 #define HW_OP_UNSUPPORTED 0xffff
714 static const int csky_pmu_hw_map
[PERF_COUNT_HW_MAX
] = {
715 [PERF_COUNT_HW_CPU_CYCLES
] = 0x1,
716 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x2,
717 [PERF_COUNT_HW_CACHE_REFERENCES
] = HW_OP_UNSUPPORTED
,
718 [PERF_COUNT_HW_CACHE_MISSES
] = HW_OP_UNSUPPORTED
,
719 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0xf,
720 [PERF_COUNT_HW_BRANCH_MISSES
] = 0xe,
721 [PERF_COUNT_HW_BUS_CYCLES
] = HW_OP_UNSUPPORTED
,
722 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = HW_OP_UNSUPPORTED
,
723 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = HW_OP_UNSUPPORTED
,
724 [PERF_COUNT_HW_REF_CPU_CYCLES
] = HW_OP_UNSUPPORTED
,
727 #define C(_x) PERF_COUNT_HW_CACHE_##_x
728 #define CACHE_OP_UNSUPPORTED 0xffff
729 static const int csky_pmu_cache_map
[C(MAX
)][C(OP_MAX
)][C(RESULT_MAX
)] = {
731 #ifdef CONFIG_CPU_CK810
733 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
734 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
737 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
738 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
741 [C(RESULT_ACCESS
)] = 0x5,
742 [C(RESULT_MISS
)] = 0x6,
746 [C(RESULT_ACCESS
)] = 0x14,
747 [C(RESULT_MISS
)] = 0x15,
750 [C(RESULT_ACCESS
)] = 0x16,
751 [C(RESULT_MISS
)] = 0x17,
754 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
755 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
761 [C(RESULT_ACCESS
)] = 0x3,
762 [C(RESULT_MISS
)] = 0x4,
765 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
766 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
769 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
770 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
774 #ifdef CONFIG_CPU_CK810
776 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
777 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
780 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
781 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
784 [C(RESULT_ACCESS
)] = 0x7,
785 [C(RESULT_MISS
)] = 0x8,
789 [C(RESULT_ACCESS
)] = 0x18,
790 [C(RESULT_MISS
)] = 0x19,
793 [C(RESULT_ACCESS
)] = 0x1a,
794 [C(RESULT_MISS
)] = 0x1b,
797 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
798 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
803 #ifdef CONFIG_CPU_CK810
805 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
806 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
809 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
810 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
814 [C(RESULT_ACCESS
)] = 0x14,
815 [C(RESULT_MISS
)] = 0xb,
818 [C(RESULT_ACCESS
)] = 0x16,
819 [C(RESULT_MISS
)] = 0xb,
823 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
824 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
828 #ifdef CONFIG_CPU_CK810
830 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
831 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
835 [C(RESULT_ACCESS
)] = 0x3,
836 [C(RESULT_MISS
)] = 0xa,
840 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
841 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
844 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
845 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
850 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
851 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
854 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
855 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
858 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
859 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
864 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
865 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
868 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
869 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
872 [C(RESULT_ACCESS
)] = CACHE_OP_UNSUPPORTED
,
873 [C(RESULT_MISS
)] = CACHE_OP_UNSUPPORTED
,
878 int csky_pmu_event_set_period(struct perf_event
*event
)
880 struct hw_perf_event
*hwc
= &event
->hw
;
881 s64 left
= local64_read(&hwc
->period_left
);
882 s64 period
= hwc
->sample_period
;
885 if (unlikely(left
<= -period
)) {
887 local64_set(&hwc
->period_left
, left
);
888 hwc
->last_period
= period
;
892 if (unlikely(left
<= 0)) {
894 local64_set(&hwc
->period_left
, left
);
895 hwc
->last_period
= period
;
899 if (left
> (s64
)csky_pmu
.max_period
)
900 left
= csky_pmu
.max_period
;
903 * The hw event starts counting from this event offset,
904 * mark it to be able to extract future "deltas":
906 local64_set(&hwc
->prev_count
, (u64
)(-left
));
908 if (hw_raw_write_mapping
[hwc
->idx
] != NULL
)
909 hw_raw_write_mapping
[hwc
->idx
]((u64
)(-left
) &
910 csky_pmu
.max_period
);
912 cpwcr(HPOFSR
, ~BIT(hwc
->idx
) & cprcr(HPOFSR
));
914 perf_event_update_userpage(event
);
919 static void csky_perf_event_update(struct perf_event
*event
,
920 struct hw_perf_event
*hwc
)
922 uint64_t prev_raw_count
= local64_read(&hwc
->prev_count
);
924 * Sign extend count value to 64bit, otherwise delta calculation
925 * would be incorrect when overflow occurs.
927 uint64_t new_raw_count
= sign_extend64(
928 hw_raw_read_mapping
[hwc
->idx
](), csky_pmu
.count_width
- 1);
929 int64_t delta
= new_raw_count
- prev_raw_count
;
932 * We aren't afraid of hwc->prev_count changing beneath our feet
933 * because there's no way for us to re-enter this function anytime.
935 local64_set(&hwc
->prev_count
, new_raw_count
);
936 local64_add(delta
, &event
->count
);
937 local64_sub(delta
, &hwc
->period_left
);
940 static void csky_pmu_reset(void *info
)
942 cpwcr(HPCR
, BIT(31) | BIT(30) | BIT(1));
945 static void csky_pmu_read(struct perf_event
*event
)
947 csky_perf_event_update(event
, &event
->hw
);
950 static int csky_pmu_cache_event(u64 config
)
952 unsigned int cache_type
, cache_op
, cache_result
;
954 cache_type
= (config
>> 0) & 0xff;
955 cache_op
= (config
>> 8) & 0xff;
956 cache_result
= (config
>> 16) & 0xff;
958 if (cache_type
>= PERF_COUNT_HW_CACHE_MAX
)
960 if (cache_op
>= PERF_COUNT_HW_CACHE_OP_MAX
)
962 if (cache_result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
965 return csky_pmu_cache_map
[cache_type
][cache_op
][cache_result
];
968 static int csky_pmu_event_init(struct perf_event
*event
)
970 struct hw_perf_event
*hwc
= &event
->hw
;
973 switch (event
->attr
.type
) {
974 case PERF_TYPE_HARDWARE
:
975 if (event
->attr
.config
>= PERF_COUNT_HW_MAX
)
977 ret
= csky_pmu_hw_map
[event
->attr
.config
];
978 if (ret
== HW_OP_UNSUPPORTED
)
982 case PERF_TYPE_HW_CACHE
:
983 ret
= csky_pmu_cache_event(event
->attr
.config
);
984 if (ret
== CACHE_OP_UNSUPPORTED
)
989 if (hw_raw_read_mapping
[event
->attr
.config
] == NULL
)
991 hwc
->idx
= event
->attr
.config
;
997 if (event
->attr
.exclude_user
)
998 csky_pmu
.hpcr
= BIT(2);
999 else if (event
->attr
.exclude_kernel
)
1000 csky_pmu
.hpcr
= BIT(3);
1002 csky_pmu
.hpcr
= BIT(2) | BIT(3);
1004 csky_pmu
.hpcr
|= BIT(1) | BIT(0);
1009 /* starts all counters */
1010 static void csky_pmu_enable(struct pmu
*pmu
)
1012 cpwcr(HPCR
, csky_pmu
.hpcr
);
1015 /* stops all counters */
1016 static void csky_pmu_disable(struct pmu
*pmu
)
1018 cpwcr(HPCR
, BIT(1));
1021 static void csky_pmu_start(struct perf_event
*event
, int flags
)
1024 struct hw_perf_event
*hwc
= &event
->hw
;
1027 if (WARN_ON_ONCE(idx
== -1))
1030 if (flags
& PERF_EF_RELOAD
)
1031 WARN_ON_ONCE(!(hwc
->state
& PERF_HES_UPTODATE
));
1035 csky_pmu_event_set_period(event
);
1037 local_irq_save(flg
);
1039 cpwcr(HPINTENR
, BIT(idx
) | cprcr(HPINTENR
));
1040 cpwcr(HPCNTENR
, BIT(idx
) | cprcr(HPCNTENR
));
1042 local_irq_restore(flg
);
1045 static void csky_pmu_stop_event(struct perf_event
*event
)
1048 struct hw_perf_event
*hwc
= &event
->hw
;
1051 local_irq_save(flg
);
1053 cpwcr(HPINTENR
, ~BIT(idx
) & cprcr(HPINTENR
));
1054 cpwcr(HPCNTENR
, ~BIT(idx
) & cprcr(HPCNTENR
));
1056 local_irq_restore(flg
);
1059 static void csky_pmu_stop(struct perf_event
*event
, int flags
)
1061 if (!(event
->hw
.state
& PERF_HES_STOPPED
)) {
1062 csky_pmu_stop_event(event
);
1063 event
->hw
.state
|= PERF_HES_STOPPED
;
1066 if ((flags
& PERF_EF_UPDATE
) &&
1067 !(event
->hw
.state
& PERF_HES_UPTODATE
)) {
1068 csky_perf_event_update(event
, &event
->hw
);
1069 event
->hw
.state
|= PERF_HES_UPTODATE
;
1073 static void csky_pmu_del(struct perf_event
*event
, int flags
)
1075 struct pmu_hw_events
*hw_events
= this_cpu_ptr(csky_pmu
.hw_events
);
1076 struct hw_perf_event
*hwc
= &event
->hw
;
1078 csky_pmu_stop(event
, PERF_EF_UPDATE
);
1080 hw_events
->events
[hwc
->idx
] = NULL
;
1082 perf_event_update_userpage(event
);
1085 /* allocate hardware counter and optionally start counting */
1086 static int csky_pmu_add(struct perf_event
*event
, int flags
)
1088 struct pmu_hw_events
*hw_events
= this_cpu_ptr(csky_pmu
.hw_events
);
1089 struct hw_perf_event
*hwc
= &event
->hw
;
1091 hw_events
->events
[hwc
->idx
] = event
;
1093 hwc
->state
= PERF_HES_UPTODATE
| PERF_HES_STOPPED
;
1095 if (flags
& PERF_EF_START
)
1096 csky_pmu_start(event
, PERF_EF_RELOAD
);
1098 perf_event_update_userpage(event
);
1103 static irqreturn_t
csky_pmu_handle_irq(int irq_num
, void *dev
)
1105 struct perf_sample_data data
;
1106 struct pmu_hw_events
*cpuc
= this_cpu_ptr(csky_pmu
.hw_events
);
1107 struct pt_regs
*regs
;
1111 * Did an overflow occur?
1117 * Handle the counter(s) overflow(s)
1119 regs
= get_irq_regs();
1121 csky_pmu_disable(&csky_pmu
.pmu
);
1123 for (idx
= 0; idx
< CSKY_PMU_MAX_EVENTS
; ++idx
) {
1124 struct perf_event
*event
= cpuc
->events
[idx
];
1125 struct hw_perf_event
*hwc
;
1127 /* Ignore if we don't have an event. */
1131 * We have a single interrupt for all counters. Check that
1132 * each counter has overflowed before we process it.
1134 if (!(cprcr(HPOFSR
) & BIT(idx
)))
1138 csky_perf_event_update(event
, &event
->hw
);
1139 perf_sample_data_init(&data
, 0, hwc
->last_period
);
1140 csky_pmu_event_set_period(event
);
1142 if (perf_event_overflow(event
, &data
, regs
))
1143 csky_pmu_stop_event(event
);
1146 csky_pmu_enable(&csky_pmu
.pmu
);
1149 * Handle the pending perf events.
1151 * Note: this call *must* be run with interrupts disabled. For
1152 * platforms that can have the PMU interrupts raised as an NMI, this
1160 static int csky_pmu_request_irq(irq_handler_t handler
)
1163 struct platform_device
*pmu_device
= csky_pmu
.plat_device
;
1168 irqs
= min(pmu_device
->num_resources
, num_possible_cpus());
1170 pr_err("no irqs for PMUs defined\n");
1174 csky_pmu_irq
= platform_get_irq(pmu_device
, 0);
1175 if (csky_pmu_irq
< 0)
1177 err
= request_percpu_irq(csky_pmu_irq
, handler
, "csky-pmu",
1178 this_cpu_ptr(csky_pmu
.hw_events
));
1180 pr_err("unable to request IRQ%d for CSKY PMU counters\n",
1188 static void csky_pmu_free_irq(void)
1191 struct platform_device
*pmu_device
= csky_pmu
.plat_device
;
1193 irq
= platform_get_irq(pmu_device
, 0);
1195 free_percpu_irq(irq
, this_cpu_ptr(csky_pmu
.hw_events
));
1198 int init_hw_perf_events(void)
1200 csky_pmu
.hw_events
= alloc_percpu_gfp(struct pmu_hw_events
,
1202 if (!csky_pmu
.hw_events
) {
1203 pr_info("failed to allocate per-cpu PMU data.\n");
1207 csky_pmu
.pmu
= (struct pmu
) {
1208 .pmu_enable
= csky_pmu_enable
,
1209 .pmu_disable
= csky_pmu_disable
,
1210 .event_init
= csky_pmu_event_init
,
1211 .add
= csky_pmu_add
,
1212 .del
= csky_pmu_del
,
1213 .start
= csky_pmu_start
,
1214 .stop
= csky_pmu_stop
,
1215 .read
= csky_pmu_read
,
1218 memset((void *)hw_raw_read_mapping
, 0,
1219 sizeof(hw_raw_read_mapping
[CSKY_PMU_MAX_EVENTS
]));
1221 hw_raw_read_mapping
[0x1] = csky_pmu_read_cc
;
1222 hw_raw_read_mapping
[0x2] = csky_pmu_read_ic
;
1223 hw_raw_read_mapping
[0x3] = csky_pmu_read_icac
;
1224 hw_raw_read_mapping
[0x4] = csky_pmu_read_icmc
;
1225 hw_raw_read_mapping
[0x5] = csky_pmu_read_dcac
;
1226 hw_raw_read_mapping
[0x6] = csky_pmu_read_dcmc
;
1227 hw_raw_read_mapping
[0x7] = csky_pmu_read_l2ac
;
1228 hw_raw_read_mapping
[0x8] = csky_pmu_read_l2mc
;
1229 hw_raw_read_mapping
[0xa] = csky_pmu_read_iutlbmc
;
1230 hw_raw_read_mapping
[0xb] = csky_pmu_read_dutlbmc
;
1231 hw_raw_read_mapping
[0xc] = csky_pmu_read_jtlbmc
;
1232 hw_raw_read_mapping
[0xd] = csky_pmu_read_softc
;
1233 hw_raw_read_mapping
[0xe] = csky_pmu_read_cbmc
;
1234 hw_raw_read_mapping
[0xf] = csky_pmu_read_cbic
;
1235 hw_raw_read_mapping
[0x10] = csky_pmu_read_ibmc
;
1236 hw_raw_read_mapping
[0x11] = csky_pmu_read_ibic
;
1237 hw_raw_read_mapping
[0x12] = csky_pmu_read_lsfc
;
1238 hw_raw_read_mapping
[0x13] = csky_pmu_read_sic
;
1239 hw_raw_read_mapping
[0x14] = csky_pmu_read_dcrac
;
1240 hw_raw_read_mapping
[0x15] = csky_pmu_read_dcrmc
;
1241 hw_raw_read_mapping
[0x16] = csky_pmu_read_dcwac
;
1242 hw_raw_read_mapping
[0x17] = csky_pmu_read_dcwmc
;
1243 hw_raw_read_mapping
[0x18] = csky_pmu_read_l2rac
;
1244 hw_raw_read_mapping
[0x19] = csky_pmu_read_l2rmc
;
1245 hw_raw_read_mapping
[0x1a] = csky_pmu_read_l2wac
;
1246 hw_raw_read_mapping
[0x1b] = csky_pmu_read_l2wmc
;
1248 memset((void *)hw_raw_write_mapping
, 0,
1249 sizeof(hw_raw_write_mapping
[CSKY_PMU_MAX_EVENTS
]));
1251 hw_raw_write_mapping
[0x1] = csky_pmu_write_cc
;
1252 hw_raw_write_mapping
[0x2] = csky_pmu_write_ic
;
1253 hw_raw_write_mapping
[0x3] = csky_pmu_write_icac
;
1254 hw_raw_write_mapping
[0x4] = csky_pmu_write_icmc
;
1255 hw_raw_write_mapping
[0x5] = csky_pmu_write_dcac
;
1256 hw_raw_write_mapping
[0x6] = csky_pmu_write_dcmc
;
1257 hw_raw_write_mapping
[0x7] = csky_pmu_write_l2ac
;
1258 hw_raw_write_mapping
[0x8] = csky_pmu_write_l2mc
;
1259 hw_raw_write_mapping
[0xa] = csky_pmu_write_iutlbmc
;
1260 hw_raw_write_mapping
[0xb] = csky_pmu_write_dutlbmc
;
1261 hw_raw_write_mapping
[0xc] = csky_pmu_write_jtlbmc
;
1262 hw_raw_write_mapping
[0xd] = csky_pmu_write_softc
;
1263 hw_raw_write_mapping
[0xe] = csky_pmu_write_cbmc
;
1264 hw_raw_write_mapping
[0xf] = csky_pmu_write_cbic
;
1265 hw_raw_write_mapping
[0x10] = csky_pmu_write_ibmc
;
1266 hw_raw_write_mapping
[0x11] = csky_pmu_write_ibic
;
1267 hw_raw_write_mapping
[0x12] = csky_pmu_write_lsfc
;
1268 hw_raw_write_mapping
[0x13] = csky_pmu_write_sic
;
1269 hw_raw_write_mapping
[0x14] = csky_pmu_write_dcrac
;
1270 hw_raw_write_mapping
[0x15] = csky_pmu_write_dcrmc
;
1271 hw_raw_write_mapping
[0x16] = csky_pmu_write_dcwac
;
1272 hw_raw_write_mapping
[0x17] = csky_pmu_write_dcwmc
;
1273 hw_raw_write_mapping
[0x18] = csky_pmu_write_l2rac
;
1274 hw_raw_write_mapping
[0x19] = csky_pmu_write_l2rmc
;
1275 hw_raw_write_mapping
[0x1a] = csky_pmu_write_l2wac
;
1276 hw_raw_write_mapping
[0x1b] = csky_pmu_write_l2wmc
;
1281 static int csky_pmu_starting_cpu(unsigned int cpu
)
1283 enable_percpu_irq(csky_pmu_irq
, 0);
1287 static int csky_pmu_dying_cpu(unsigned int cpu
)
1289 disable_percpu_irq(csky_pmu_irq
);
1293 int csky_pmu_device_probe(struct platform_device
*pdev
,
1294 const struct of_device_id
*of_table
)
1296 struct device_node
*node
= pdev
->dev
.of_node
;
1299 ret
= init_hw_perf_events();
1301 pr_notice("[perf] failed to probe PMU!\n");
1305 if (of_property_read_u32(node
, "count-width",
1306 &csky_pmu
.count_width
)) {
1307 csky_pmu
.count_width
= DEFAULT_COUNT_WIDTH
;
1309 csky_pmu
.max_period
= BIT_ULL(csky_pmu
.count_width
) - 1;
1311 csky_pmu
.plat_device
= pdev
;
1313 /* Ensure the PMU has sane values out of reset. */
1314 on_each_cpu(csky_pmu_reset
, &csky_pmu
, 1);
1316 ret
= csky_pmu_request_irq(csky_pmu_handle_irq
);
1318 csky_pmu
.pmu
.capabilities
|= PERF_PMU_CAP_NO_INTERRUPT
;
1319 pr_notice("[perf] PMU request irq fail!\n");
1322 ret
= cpuhp_setup_state(CPUHP_AP_PERF_ONLINE
, "AP_PERF_ONLINE",
1323 csky_pmu_starting_cpu
,
1324 csky_pmu_dying_cpu
);
1326 csky_pmu_free_irq();
1327 free_percpu(csky_pmu
.hw_events
);
1331 ret
= perf_pmu_register(&csky_pmu
.pmu
, "cpu", PERF_TYPE_RAW
);
1333 csky_pmu_free_irq();
1334 free_percpu(csky_pmu
.hw_events
);
1340 static const struct of_device_id csky_pmu_of_device_ids
[] = {
1341 {.compatible
= "csky,csky-pmu"},
1345 static int csky_pmu_dev_probe(struct platform_device
*pdev
)
1347 return csky_pmu_device_probe(pdev
, csky_pmu_of_device_ids
);
1350 static struct platform_driver csky_pmu_driver
= {
1353 .of_match_table
= csky_pmu_of_device_ids
,
1355 .probe
= csky_pmu_dev_probe
,
1358 static int __init
csky_pmu_probe(void)
1362 ret
= platform_driver_register(&csky_pmu_driver
);
1364 pr_notice("[perf] PMU initialization failed\n");
1366 pr_notice("[perf] PMU initialization done\n");
1371 device_initcall(csky_pmu_probe
);