2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/perf_event.h>
26 #include <linux/pm_runtime.h>
30 #include "intel_ringbuffer.h"
32 /* Frequency for the sampling timer for events which need it. */
34 #define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY)
36 #define ENGINE_SAMPLE_MASK \
37 (BIT(I915_SAMPLE_BUSY) | \
38 BIT(I915_SAMPLE_WAIT) | \
39 BIT(I915_SAMPLE_SEMA))
41 #define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS)
43 static cpumask_t i915_pmu_cpumask
;
45 static u8
engine_config_sample(u64 config
)
47 return config
& I915_PMU_SAMPLE_MASK
;
50 static u8
engine_event_sample(struct perf_event
*event
)
52 return engine_config_sample(event
->attr
.config
);
55 static u8
engine_event_class(struct perf_event
*event
)
57 return (event
->attr
.config
>> I915_PMU_CLASS_SHIFT
) & 0xff;
60 static u8
engine_event_instance(struct perf_event
*event
)
62 return (event
->attr
.config
>> I915_PMU_SAMPLE_BITS
) & 0xff;
65 static bool is_engine_config(u64 config
)
67 return config
< __I915_PMU_OTHER(0);
70 static unsigned int config_enabled_bit(u64 config
)
72 if (is_engine_config(config
))
73 return engine_config_sample(config
);
75 return ENGINE_SAMPLE_BITS
+ (config
- __I915_PMU_OTHER(0));
78 static u64
config_enabled_mask(u64 config
)
80 return BIT_ULL(config_enabled_bit(config
));
83 static bool is_engine_event(struct perf_event
*event
)
85 return is_engine_config(event
->attr
.config
);
88 static unsigned int event_enabled_bit(struct perf_event
*event
)
90 return config_enabled_bit(event
->attr
.config
);
93 static bool pmu_needs_timer(struct drm_i915_private
*i915
, bool gpu_active
)
98 * Only some counters need the sampling timer.
100 * We start with a bitmask of all currently enabled events.
102 enable
= i915
->pmu
.enable
;
105 * Mask out all the ones which do not need the timer, or in
106 * other words keep all the ones that could need the timer.
108 enable
&= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY
) |
109 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY
) |
113 * When the GPU is idle per-engine counters do not need to be
114 * running so clear those bits out.
117 enable
&= ~ENGINE_SAMPLE_MASK
;
119 * Also there is software busyness tracking available we do not
120 * need the timer for I915_SAMPLE_BUSY counter.
122 * Use RCS as proxy for all engines.
124 else if (intel_engine_supports_stats(i915
->engine
[RCS
]))
125 enable
&= ~BIT(I915_SAMPLE_BUSY
);
128 * If some bits remain it means we need the sampling timer running.
133 void i915_pmu_gt_parked(struct drm_i915_private
*i915
)
135 if (!i915
->pmu
.base
.event_init
)
138 spin_lock_irq(&i915
->pmu
.lock
);
140 * Signal sampling timer to stop if only engine events are enabled and
143 i915
->pmu
.timer_enabled
= pmu_needs_timer(i915
, false);
144 spin_unlock_irq(&i915
->pmu
.lock
);
147 static void __i915_pmu_maybe_start_timer(struct drm_i915_private
*i915
)
149 if (!i915
->pmu
.timer_enabled
&& pmu_needs_timer(i915
, true)) {
150 i915
->pmu
.timer_enabled
= true;
151 hrtimer_start_range_ns(&i915
->pmu
.timer
,
152 ns_to_ktime(PERIOD
), 0,
153 HRTIMER_MODE_REL_PINNED
);
157 void i915_pmu_gt_unparked(struct drm_i915_private
*i915
)
159 if (!i915
->pmu
.base
.event_init
)
162 spin_lock_irq(&i915
->pmu
.lock
);
164 * Re-enable sampling timer when GPU goes active.
166 __i915_pmu_maybe_start_timer(i915
);
167 spin_unlock_irq(&i915
->pmu
.lock
);
170 static bool grab_forcewake(struct drm_i915_private
*i915
, bool fw
)
173 intel_uncore_forcewake_get(i915
, FORCEWAKE_ALL
);
179 update_sample(struct i915_pmu_sample
*sample
, u32 unit
, u32 val
)
181 sample
->cur
+= mul_u32_u32(val
, unit
);
184 static void engines_sample(struct drm_i915_private
*dev_priv
)
186 struct intel_engine_cs
*engine
;
187 enum intel_engine_id id
;
190 if ((dev_priv
->pmu
.enable
& ENGINE_SAMPLE_MASK
) == 0)
193 if (!dev_priv
->gt
.awake
)
196 if (!intel_runtime_pm_get_if_in_use(dev_priv
))
199 for_each_engine(engine
, dev_priv
, id
) {
200 u32 current_seqno
= intel_engine_get_seqno(engine
);
201 u32 last_seqno
= intel_engine_last_submit(engine
);
204 val
= !i915_seqno_passed(current_seqno
, last_seqno
);
206 update_sample(&engine
->pmu
.sample
[I915_SAMPLE_BUSY
],
209 if (val
&& (engine
->pmu
.enable
&
210 (BIT(I915_SAMPLE_WAIT
) | BIT(I915_SAMPLE_SEMA
)))) {
211 fw
= grab_forcewake(dev_priv
, fw
);
213 val
= I915_READ_FW(RING_CTL(engine
->mmio_base
));
218 update_sample(&engine
->pmu
.sample
[I915_SAMPLE_WAIT
],
219 PERIOD
, !!(val
& RING_WAIT
));
221 update_sample(&engine
->pmu
.sample
[I915_SAMPLE_SEMA
],
222 PERIOD
, !!(val
& RING_WAIT_SEMAPHORE
));
226 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
228 intel_runtime_pm_put(dev_priv
);
231 static void frequency_sample(struct drm_i915_private
*dev_priv
)
233 if (dev_priv
->pmu
.enable
&
234 config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY
)) {
237 val
= dev_priv
->gt_pm
.rps
.cur_freq
;
238 if (dev_priv
->gt
.awake
&&
239 intel_runtime_pm_get_if_in_use(dev_priv
)) {
240 val
= intel_get_cagf(dev_priv
,
241 I915_READ_NOTRACE(GEN6_RPSTAT1
));
242 intel_runtime_pm_put(dev_priv
);
245 update_sample(&dev_priv
->pmu
.sample
[__I915_SAMPLE_FREQ_ACT
],
246 1, intel_gpu_freq(dev_priv
, val
));
249 if (dev_priv
->pmu
.enable
&
250 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY
)) {
251 update_sample(&dev_priv
->pmu
.sample
[__I915_SAMPLE_FREQ_REQ
], 1,
252 intel_gpu_freq(dev_priv
,
253 dev_priv
->gt_pm
.rps
.cur_freq
));
257 static enum hrtimer_restart
i915_sample(struct hrtimer
*hrtimer
)
259 struct drm_i915_private
*i915
=
260 container_of(hrtimer
, struct drm_i915_private
, pmu
.timer
);
262 if (!READ_ONCE(i915
->pmu
.timer_enabled
))
263 return HRTIMER_NORESTART
;
265 engines_sample(i915
);
266 frequency_sample(i915
);
268 hrtimer_forward_now(hrtimer
, ns_to_ktime(PERIOD
));
269 return HRTIMER_RESTART
;
272 static u64
count_interrupts(struct drm_i915_private
*i915
)
274 /* open-coded kstat_irqs() */
275 struct irq_desc
*desc
= irq_to_desc(i915
->drm
.pdev
->irq
);
279 if (!desc
|| !desc
->kstat_irqs
)
282 for_each_possible_cpu(cpu
)
283 sum
+= *per_cpu_ptr(desc
->kstat_irqs
, cpu
);
288 static void engine_event_destroy(struct perf_event
*event
)
290 struct drm_i915_private
*i915
=
291 container_of(event
->pmu
, typeof(*i915
), pmu
.base
);
292 struct intel_engine_cs
*engine
;
294 engine
= intel_engine_lookup_user(i915
,
295 engine_event_class(event
),
296 engine_event_instance(event
));
297 if (WARN_ON_ONCE(!engine
))
300 if (engine_event_sample(event
) == I915_SAMPLE_BUSY
&&
301 intel_engine_supports_stats(engine
))
302 intel_disable_engine_stats(engine
);
305 static void i915_pmu_event_destroy(struct perf_event
*event
)
307 WARN_ON(event
->parent
);
309 if (is_engine_event(event
))
310 engine_event_destroy(event
);
314 engine_event_status(struct intel_engine_cs
*engine
,
315 enum drm_i915_pmu_engine_sample sample
)
318 case I915_SAMPLE_BUSY
:
319 case I915_SAMPLE_WAIT
:
321 case I915_SAMPLE_SEMA
:
322 if (INTEL_GEN(engine
->i915
) < 6)
332 static int engine_event_init(struct perf_event
*event
)
334 struct drm_i915_private
*i915
=
335 container_of(event
->pmu
, typeof(*i915
), pmu
.base
);
336 struct intel_engine_cs
*engine
;
340 engine
= intel_engine_lookup_user(i915
, engine_event_class(event
),
341 engine_event_instance(event
));
345 sample
= engine_event_sample(event
);
346 ret
= engine_event_status(engine
, sample
);
350 if (sample
== I915_SAMPLE_BUSY
&& intel_engine_supports_stats(engine
))
351 ret
= intel_enable_engine_stats(engine
);
356 static int i915_pmu_event_init(struct perf_event
*event
)
358 struct drm_i915_private
*i915
=
359 container_of(event
->pmu
, typeof(*i915
), pmu
.base
);
362 if (event
->attr
.type
!= event
->pmu
->type
)
365 /* unsupported modes and filters */
366 if (event
->attr
.sample_period
) /* no sampling */
369 if (has_branch_stack(event
))
375 /* only allow running on one cpu at a time */
376 if (!cpumask_test_cpu(event
->cpu
, &i915_pmu_cpumask
))
379 if (is_engine_event(event
)) {
380 ret
= engine_event_init(event
);
383 switch (event
->attr
.config
) {
384 case I915_PMU_ACTUAL_FREQUENCY
:
385 if (IS_VALLEYVIEW(i915
) || IS_CHERRYVIEW(i915
))
386 /* Requires a mutex for sampling! */
388 case I915_PMU_REQUESTED_FREQUENCY
:
389 if (INTEL_GEN(i915
) < 6)
392 case I915_PMU_INTERRUPTS
:
394 case I915_PMU_RC6_RESIDENCY
:
407 event
->destroy
= i915_pmu_event_destroy
;
412 static u64
__get_rc6(struct drm_i915_private
*i915
)
416 val
= intel_rc6_residency_ns(i915
,
417 IS_VALLEYVIEW(i915
) ?
422 val
+= intel_rc6_residency_ns(i915
, GEN6_GT_GFX_RC6p
);
425 val
+= intel_rc6_residency_ns(i915
, GEN6_GT_GFX_RC6pp
);
430 static u64
get_rc6(struct drm_i915_private
*i915
, bool locked
)
432 #if IS_ENABLED(CONFIG_PM)
436 if (intel_runtime_pm_get_if_in_use(i915
)) {
437 val
= __get_rc6(i915
);
438 intel_runtime_pm_put(i915
);
441 * If we are coming back from being runtime suspended we must
442 * be careful not to report a larger value than returned
447 spin_lock_irqsave(&i915
->pmu
.lock
, flags
);
449 if (val
>= i915
->pmu
.sample
[__I915_SAMPLE_RC6_ESTIMATED
].cur
) {
450 i915
->pmu
.sample
[__I915_SAMPLE_RC6_ESTIMATED
].cur
= 0;
451 i915
->pmu
.sample
[__I915_SAMPLE_RC6
].cur
= val
;
453 val
= i915
->pmu
.sample
[__I915_SAMPLE_RC6_ESTIMATED
].cur
;
457 spin_unlock_irqrestore(&i915
->pmu
.lock
, flags
);
459 struct pci_dev
*pdev
= i915
->drm
.pdev
;
460 struct device
*kdev
= &pdev
->dev
;
461 unsigned long flags2
;
464 * We are runtime suspended.
466 * Report the delta from when the device was suspended to now,
467 * on top of the last known real value, as the approximated RC6
471 spin_lock_irqsave(&i915
->pmu
.lock
, flags
);
473 spin_lock_irqsave(&kdev
->power
.lock
, flags2
);
475 if (!i915
->pmu
.sample
[__I915_SAMPLE_RC6_ESTIMATED
].cur
)
476 i915
->pmu
.suspended_jiffies_last
=
477 kdev
->power
.suspended_jiffies
;
479 val
= kdev
->power
.suspended_jiffies
-
480 i915
->pmu
.suspended_jiffies_last
;
481 val
+= jiffies
- kdev
->power
.accounting_timestamp
;
483 spin_unlock_irqrestore(&kdev
->power
.lock
, flags2
);
485 val
= jiffies_to_nsecs(val
);
486 val
+= i915
->pmu
.sample
[__I915_SAMPLE_RC6
].cur
;
487 i915
->pmu
.sample
[__I915_SAMPLE_RC6_ESTIMATED
].cur
= val
;
490 spin_unlock_irqrestore(&i915
->pmu
.lock
, flags
);
495 return __get_rc6(i915
);
499 static u64
__i915_pmu_event_read(struct perf_event
*event
, bool locked
)
501 struct drm_i915_private
*i915
=
502 container_of(event
->pmu
, typeof(*i915
), pmu
.base
);
505 if (is_engine_event(event
)) {
506 u8 sample
= engine_event_sample(event
);
507 struct intel_engine_cs
*engine
;
509 engine
= intel_engine_lookup_user(i915
,
510 engine_event_class(event
),
511 engine_event_instance(event
));
513 if (WARN_ON_ONCE(!engine
)) {
515 } else if (sample
== I915_SAMPLE_BUSY
&&
516 intel_engine_supports_stats(engine
)) {
517 val
= ktime_to_ns(intel_engine_get_busy_time(engine
));
519 val
= engine
->pmu
.sample
[sample
].cur
;
522 switch (event
->attr
.config
) {
523 case I915_PMU_ACTUAL_FREQUENCY
:
525 div_u64(i915
->pmu
.sample
[__I915_SAMPLE_FREQ_ACT
].cur
,
528 case I915_PMU_REQUESTED_FREQUENCY
:
530 div_u64(i915
->pmu
.sample
[__I915_SAMPLE_FREQ_REQ
].cur
,
533 case I915_PMU_INTERRUPTS
:
534 val
= count_interrupts(i915
);
536 case I915_PMU_RC6_RESIDENCY
:
537 val
= get_rc6(i915
, locked
);
545 static void i915_pmu_event_read(struct perf_event
*event
)
547 struct hw_perf_event
*hwc
= &event
->hw
;
551 prev
= local64_read(&hwc
->prev_count
);
552 new = __i915_pmu_event_read(event
, false);
554 if (local64_cmpxchg(&hwc
->prev_count
, prev
, new) != prev
)
557 local64_add(new - prev
, &event
->count
);
560 static void i915_pmu_enable(struct perf_event
*event
)
562 struct drm_i915_private
*i915
=
563 container_of(event
->pmu
, typeof(*i915
), pmu
.base
);
564 unsigned int bit
= event_enabled_bit(event
);
567 spin_lock_irqsave(&i915
->pmu
.lock
, flags
);
570 * Update the bitmask of enabled events and increment
571 * the event reference counter.
573 GEM_BUG_ON(bit
>= I915_PMU_MASK_BITS
);
574 GEM_BUG_ON(i915
->pmu
.enable_count
[bit
] == ~0);
575 i915
->pmu
.enable
|= BIT_ULL(bit
);
576 i915
->pmu
.enable_count
[bit
]++;
579 * Start the sampling timer if needed and not already enabled.
581 __i915_pmu_maybe_start_timer(i915
);
584 * For per-engine events the bitmask and reference counting
585 * is stored per engine.
587 if (is_engine_event(event
)) {
588 u8 sample
= engine_event_sample(event
);
589 struct intel_engine_cs
*engine
;
591 engine
= intel_engine_lookup_user(i915
,
592 engine_event_class(event
),
593 engine_event_instance(event
));
595 engine
->pmu
.enable
|= BIT(sample
);
597 GEM_BUG_ON(sample
>= I915_PMU_SAMPLE_BITS
);
598 GEM_BUG_ON(engine
->pmu
.enable_count
[sample
] == ~0);
599 engine
->pmu
.enable_count
[sample
]++;
603 * Store the current counter value so we can report the correct delta
604 * for all listeners. Even when the event was already enabled and has
605 * an existing non-zero value.
607 local64_set(&event
->hw
.prev_count
, __i915_pmu_event_read(event
, true));
609 spin_unlock_irqrestore(&i915
->pmu
.lock
, flags
);
612 static void i915_pmu_disable(struct perf_event
*event
)
614 struct drm_i915_private
*i915
=
615 container_of(event
->pmu
, typeof(*i915
), pmu
.base
);
616 unsigned int bit
= event_enabled_bit(event
);
619 spin_lock_irqsave(&i915
->pmu
.lock
, flags
);
621 if (is_engine_event(event
)) {
622 u8 sample
= engine_event_sample(event
);
623 struct intel_engine_cs
*engine
;
625 engine
= intel_engine_lookup_user(i915
,
626 engine_event_class(event
),
627 engine_event_instance(event
));
629 GEM_BUG_ON(sample
>= I915_PMU_SAMPLE_BITS
);
630 GEM_BUG_ON(engine
->pmu
.enable_count
[sample
] == 0);
632 * Decrement the reference count and clear the enabled
633 * bitmask when the last listener on an event goes away.
635 if (--engine
->pmu
.enable_count
[sample
] == 0)
636 engine
->pmu
.enable
&= ~BIT(sample
);
639 GEM_BUG_ON(bit
>= I915_PMU_MASK_BITS
);
640 GEM_BUG_ON(i915
->pmu
.enable_count
[bit
] == 0);
642 * Decrement the reference count and clear the enabled
643 * bitmask when the last listener on an event goes away.
645 if (--i915
->pmu
.enable_count
[bit
] == 0) {
646 i915
->pmu
.enable
&= ~BIT_ULL(bit
);
647 i915
->pmu
.timer_enabled
&= pmu_needs_timer(i915
, true);
650 spin_unlock_irqrestore(&i915
->pmu
.lock
, flags
);
653 static void i915_pmu_event_start(struct perf_event
*event
, int flags
)
655 i915_pmu_enable(event
);
659 static void i915_pmu_event_stop(struct perf_event
*event
, int flags
)
661 if (flags
& PERF_EF_UPDATE
)
662 i915_pmu_event_read(event
);
663 i915_pmu_disable(event
);
664 event
->hw
.state
= PERF_HES_STOPPED
;
667 static int i915_pmu_event_add(struct perf_event
*event
, int flags
)
669 if (flags
& PERF_EF_START
)
670 i915_pmu_event_start(event
, flags
);
675 static void i915_pmu_event_del(struct perf_event
*event
, int flags
)
677 i915_pmu_event_stop(event
, PERF_EF_UPDATE
);
680 static int i915_pmu_event_event_idx(struct perf_event
*event
)
685 struct i915_str_attribute
{
686 struct device_attribute attr
;
690 static ssize_t
i915_pmu_format_show(struct device
*dev
,
691 struct device_attribute
*attr
, char *buf
)
693 struct i915_str_attribute
*eattr
;
695 eattr
= container_of(attr
, struct i915_str_attribute
, attr
);
696 return sprintf(buf
, "%s\n", eattr
->str
);
699 #define I915_PMU_FORMAT_ATTR(_name, _config) \
700 (&((struct i915_str_attribute[]) { \
701 { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \
705 static struct attribute
*i915_pmu_format_attrs
[] = {
706 I915_PMU_FORMAT_ATTR(i915_eventid
, "config:0-20"),
710 static const struct attribute_group i915_pmu_format_attr_group
= {
712 .attrs
= i915_pmu_format_attrs
,
715 struct i915_ext_attribute
{
716 struct device_attribute attr
;
720 static ssize_t
i915_pmu_event_show(struct device
*dev
,
721 struct device_attribute
*attr
, char *buf
)
723 struct i915_ext_attribute
*eattr
;
725 eattr
= container_of(attr
, struct i915_ext_attribute
, attr
);
726 return sprintf(buf
, "config=0x%lx\n", eattr
->val
);
729 #define I915_EVENT_ATTR(_name, _config) \
730 (&((struct i915_ext_attribute[]) { \
731 { .attr = __ATTR(_name, 0444, i915_pmu_event_show, NULL), \
735 #define I915_EVENT_STR(_name, _str) \
736 (&((struct perf_pmu_events_attr[]) { \
737 { .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
739 .event_str = _str, } \
742 #define I915_EVENT(_name, _config, _unit) \
743 I915_EVENT_ATTR(_name, _config), \
744 I915_EVENT_STR(_name.unit, _unit)
746 #define I915_ENGINE_EVENT(_name, _class, _instance, _sample) \
747 I915_EVENT_ATTR(_name, __I915_PMU_ENGINE(_class, _instance, _sample)), \
748 I915_EVENT_STR(_name.unit, "ns")
750 #define I915_ENGINE_EVENTS(_name, _class, _instance) \
751 I915_ENGINE_EVENT(_name##_instance-busy, _class, _instance, I915_SAMPLE_BUSY), \
752 I915_ENGINE_EVENT(_name##_instance-sema, _class, _instance, I915_SAMPLE_SEMA), \
753 I915_ENGINE_EVENT(_name##_instance-wait, _class, _instance, I915_SAMPLE_WAIT)
755 static struct attribute
*i915_pmu_events_attrs
[] = {
756 I915_ENGINE_EVENTS(rcs
, I915_ENGINE_CLASS_RENDER
, 0),
757 I915_ENGINE_EVENTS(bcs
, I915_ENGINE_CLASS_COPY
, 0),
758 I915_ENGINE_EVENTS(vcs
, I915_ENGINE_CLASS_VIDEO
, 0),
759 I915_ENGINE_EVENTS(vcs
, I915_ENGINE_CLASS_VIDEO
, 1),
760 I915_ENGINE_EVENTS(vecs
, I915_ENGINE_CLASS_VIDEO_ENHANCE
, 0),
762 I915_EVENT(actual
-frequency
, I915_PMU_ACTUAL_FREQUENCY
, "MHz"),
763 I915_EVENT(requested
-frequency
, I915_PMU_REQUESTED_FREQUENCY
, "MHz"),
765 I915_EVENT_ATTR(interrupts
, I915_PMU_INTERRUPTS
),
767 I915_EVENT(rc6
-residency
, I915_PMU_RC6_RESIDENCY
, "ns"),
772 static const struct attribute_group i915_pmu_events_attr_group
= {
774 .attrs
= i915_pmu_events_attrs
,
778 i915_pmu_get_attr_cpumask(struct device
*dev
,
779 struct device_attribute
*attr
,
782 return cpumap_print_to_pagebuf(true, buf
, &i915_pmu_cpumask
);
785 static DEVICE_ATTR(cpumask
, 0444, i915_pmu_get_attr_cpumask
, NULL
);
787 static struct attribute
*i915_cpumask_attrs
[] = {
788 &dev_attr_cpumask
.attr
,
792 static struct attribute_group i915_pmu_cpumask_attr_group
= {
793 .attrs
= i915_cpumask_attrs
,
796 static const struct attribute_group
*i915_pmu_attr_groups
[] = {
797 &i915_pmu_format_attr_group
,
798 &i915_pmu_events_attr_group
,
799 &i915_pmu_cpumask_attr_group
,
803 static int i915_pmu_cpu_online(unsigned int cpu
, struct hlist_node
*node
)
805 struct i915_pmu
*pmu
= hlist_entry_safe(node
, typeof(*pmu
), node
);
807 GEM_BUG_ON(!pmu
->base
.event_init
);
809 /* Select the first online CPU as a designated reader. */
810 if (!cpumask_weight(&i915_pmu_cpumask
))
811 cpumask_set_cpu(cpu
, &i915_pmu_cpumask
);
816 static int i915_pmu_cpu_offline(unsigned int cpu
, struct hlist_node
*node
)
818 struct i915_pmu
*pmu
= hlist_entry_safe(node
, typeof(*pmu
), node
);
821 GEM_BUG_ON(!pmu
->base
.event_init
);
823 if (cpumask_test_and_clear_cpu(cpu
, &i915_pmu_cpumask
)) {
824 target
= cpumask_any_but(topology_sibling_cpumask(cpu
), cpu
);
825 /* Migrate events if there is a valid target */
826 if (target
< nr_cpu_ids
) {
827 cpumask_set_cpu(target
, &i915_pmu_cpumask
);
828 perf_pmu_migrate_context(&pmu
->base
, cpu
, target
);
835 static enum cpuhp_state cpuhp_slot
= CPUHP_INVALID
;
837 static int i915_pmu_register_cpuhp_state(struct drm_i915_private
*i915
)
839 enum cpuhp_state slot
;
842 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
,
843 "perf/x86/intel/i915:online",
845 i915_pmu_cpu_offline
);
850 ret
= cpuhp_state_add_instance(slot
, &i915
->pmu
.node
);
852 cpuhp_remove_multi_state(slot
);
860 static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private
*i915
)
862 WARN_ON(cpuhp_slot
== CPUHP_INVALID
);
863 WARN_ON(cpuhp_state_remove_instance(cpuhp_slot
, &i915
->pmu
.node
));
864 cpuhp_remove_multi_state(cpuhp_slot
);
867 void i915_pmu_register(struct drm_i915_private
*i915
)
871 if (INTEL_GEN(i915
) <= 2) {
872 DRM_INFO("PMU not supported for this GPU.");
876 i915
->pmu
.base
.attr_groups
= i915_pmu_attr_groups
;
877 i915
->pmu
.base
.task_ctx_nr
= perf_invalid_context
;
878 i915
->pmu
.base
.event_init
= i915_pmu_event_init
;
879 i915
->pmu
.base
.add
= i915_pmu_event_add
;
880 i915
->pmu
.base
.del
= i915_pmu_event_del
;
881 i915
->pmu
.base
.start
= i915_pmu_event_start
;
882 i915
->pmu
.base
.stop
= i915_pmu_event_stop
;
883 i915
->pmu
.base
.read
= i915_pmu_event_read
;
884 i915
->pmu
.base
.event_idx
= i915_pmu_event_event_idx
;
886 spin_lock_init(&i915
->pmu
.lock
);
887 hrtimer_init(&i915
->pmu
.timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
888 i915
->pmu
.timer
.function
= i915_sample
;
890 ret
= perf_pmu_register(&i915
->pmu
.base
, "i915", -1);
894 ret
= i915_pmu_register_cpuhp_state(i915
);
901 perf_pmu_unregister(&i915
->pmu
.base
);
903 i915
->pmu
.base
.event_init
= NULL
;
904 DRM_NOTE("Failed to register PMU! (err=%d)\n", ret
);
907 void i915_pmu_unregister(struct drm_i915_private
*i915
)
909 if (!i915
->pmu
.base
.event_init
)
912 WARN_ON(i915
->pmu
.enable
);
914 hrtimer_cancel(&i915
->pmu
.timer
);
916 i915_pmu_unregister_cpuhp_state(i915
);
918 perf_pmu_unregister(&i915
->pmu
.base
);
919 i915
->pmu
.base
.event_init
= NULL
;