Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cris-mirror.git] / drivers / gpu / drm / i915 / i915_pmu.c
blob0e9b98c32b62b0225633a24d3fa41396f9ab70d8
1 /*
2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
25 #include <linux/perf_event.h>
26 #include <linux/pm_runtime.h>
28 #include "i915_drv.h"
29 #include "i915_pmu.h"
30 #include "intel_ringbuffer.h"
32 /* Frequency for the sampling timer for events which need it. */
33 #define FREQUENCY 200
34 #define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY)
36 #define ENGINE_SAMPLE_MASK \
37 (BIT(I915_SAMPLE_BUSY) | \
38 BIT(I915_SAMPLE_WAIT) | \
39 BIT(I915_SAMPLE_SEMA))
41 #define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS)
43 static cpumask_t i915_pmu_cpumask;
45 static u8 engine_config_sample(u64 config)
47 return config & I915_PMU_SAMPLE_MASK;
50 static u8 engine_event_sample(struct perf_event *event)
52 return engine_config_sample(event->attr.config);
55 static u8 engine_event_class(struct perf_event *event)
57 return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff;
60 static u8 engine_event_instance(struct perf_event *event)
62 return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff;
65 static bool is_engine_config(u64 config)
67 return config < __I915_PMU_OTHER(0);
70 static unsigned int config_enabled_bit(u64 config)
72 if (is_engine_config(config))
73 return engine_config_sample(config);
74 else
75 return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0));
78 static u64 config_enabled_mask(u64 config)
80 return BIT_ULL(config_enabled_bit(config));
83 static bool is_engine_event(struct perf_event *event)
85 return is_engine_config(event->attr.config);
88 static unsigned int event_enabled_bit(struct perf_event *event)
90 return config_enabled_bit(event->attr.config);
93 static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
95 u64 enable;
98 * Only some counters need the sampling timer.
100 * We start with a bitmask of all currently enabled events.
102 enable = i915->pmu.enable;
105 * Mask out all the ones which do not need the timer, or in
106 * other words keep all the ones that could need the timer.
108 enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
109 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) |
110 ENGINE_SAMPLE_MASK;
113 * When the GPU is idle per-engine counters do not need to be
114 * running so clear those bits out.
116 if (!gpu_active)
117 enable &= ~ENGINE_SAMPLE_MASK;
119 * Also there is software busyness tracking available we do not
120 * need the timer for I915_SAMPLE_BUSY counter.
122 * Use RCS as proxy for all engines.
124 else if (intel_engine_supports_stats(i915->engine[RCS]))
125 enable &= ~BIT(I915_SAMPLE_BUSY);
128 * If some bits remain it means we need the sampling timer running.
130 return enable;
133 void i915_pmu_gt_parked(struct drm_i915_private *i915)
135 if (!i915->pmu.base.event_init)
136 return;
138 spin_lock_irq(&i915->pmu.lock);
140 * Signal sampling timer to stop if only engine events are enabled and
141 * GPU went idle.
143 i915->pmu.timer_enabled = pmu_needs_timer(i915, false);
144 spin_unlock_irq(&i915->pmu.lock);
147 static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
149 if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) {
150 i915->pmu.timer_enabled = true;
151 hrtimer_start_range_ns(&i915->pmu.timer,
152 ns_to_ktime(PERIOD), 0,
153 HRTIMER_MODE_REL_PINNED);
157 void i915_pmu_gt_unparked(struct drm_i915_private *i915)
159 if (!i915->pmu.base.event_init)
160 return;
162 spin_lock_irq(&i915->pmu.lock);
164 * Re-enable sampling timer when GPU goes active.
166 __i915_pmu_maybe_start_timer(i915);
167 spin_unlock_irq(&i915->pmu.lock);
170 static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
172 if (!fw)
173 intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
175 return true;
178 static void
179 update_sample(struct i915_pmu_sample *sample, u32 unit, u32 val)
181 sample->cur += mul_u32_u32(val, unit);
184 static void engines_sample(struct drm_i915_private *dev_priv)
186 struct intel_engine_cs *engine;
187 enum intel_engine_id id;
188 bool fw = false;
190 if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
191 return;
193 if (!dev_priv->gt.awake)
194 return;
196 if (!intel_runtime_pm_get_if_in_use(dev_priv))
197 return;
199 for_each_engine(engine, dev_priv, id) {
200 u32 current_seqno = intel_engine_get_seqno(engine);
201 u32 last_seqno = intel_engine_last_submit(engine);
202 u32 val;
204 val = !i915_seqno_passed(current_seqno, last_seqno);
206 update_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
207 PERIOD, val);
209 if (val && (engine->pmu.enable &
210 (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) {
211 fw = grab_forcewake(dev_priv, fw);
213 val = I915_READ_FW(RING_CTL(engine->mmio_base));
214 } else {
215 val = 0;
218 update_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
219 PERIOD, !!(val & RING_WAIT));
221 update_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
222 PERIOD, !!(val & RING_WAIT_SEMAPHORE));
225 if (fw)
226 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
228 intel_runtime_pm_put(dev_priv);
231 static void frequency_sample(struct drm_i915_private *dev_priv)
233 if (dev_priv->pmu.enable &
234 config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
235 u32 val;
237 val = dev_priv->gt_pm.rps.cur_freq;
238 if (dev_priv->gt.awake &&
239 intel_runtime_pm_get_if_in_use(dev_priv)) {
240 val = intel_get_cagf(dev_priv,
241 I915_READ_NOTRACE(GEN6_RPSTAT1));
242 intel_runtime_pm_put(dev_priv);
245 update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
246 1, intel_gpu_freq(dev_priv, val));
249 if (dev_priv->pmu.enable &
250 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
251 update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], 1,
252 intel_gpu_freq(dev_priv,
253 dev_priv->gt_pm.rps.cur_freq));
257 static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
259 struct drm_i915_private *i915 =
260 container_of(hrtimer, struct drm_i915_private, pmu.timer);
262 if (!READ_ONCE(i915->pmu.timer_enabled))
263 return HRTIMER_NORESTART;
265 engines_sample(i915);
266 frequency_sample(i915);
268 hrtimer_forward_now(hrtimer, ns_to_ktime(PERIOD));
269 return HRTIMER_RESTART;
272 static u64 count_interrupts(struct drm_i915_private *i915)
274 /* open-coded kstat_irqs() */
275 struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq);
276 u64 sum = 0;
277 int cpu;
279 if (!desc || !desc->kstat_irqs)
280 return 0;
282 for_each_possible_cpu(cpu)
283 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
285 return sum;
288 static void engine_event_destroy(struct perf_event *event)
290 struct drm_i915_private *i915 =
291 container_of(event->pmu, typeof(*i915), pmu.base);
292 struct intel_engine_cs *engine;
294 engine = intel_engine_lookup_user(i915,
295 engine_event_class(event),
296 engine_event_instance(event));
297 if (WARN_ON_ONCE(!engine))
298 return;
300 if (engine_event_sample(event) == I915_SAMPLE_BUSY &&
301 intel_engine_supports_stats(engine))
302 intel_disable_engine_stats(engine);
305 static void i915_pmu_event_destroy(struct perf_event *event)
307 WARN_ON(event->parent);
309 if (is_engine_event(event))
310 engine_event_destroy(event);
313 static int
314 engine_event_status(struct intel_engine_cs *engine,
315 enum drm_i915_pmu_engine_sample sample)
317 switch (sample) {
318 case I915_SAMPLE_BUSY:
319 case I915_SAMPLE_WAIT:
320 break;
321 case I915_SAMPLE_SEMA:
322 if (INTEL_GEN(engine->i915) < 6)
323 return -ENODEV;
324 break;
325 default:
326 return -ENOENT;
329 return 0;
332 static int engine_event_init(struct perf_event *event)
334 struct drm_i915_private *i915 =
335 container_of(event->pmu, typeof(*i915), pmu.base);
336 struct intel_engine_cs *engine;
337 u8 sample;
338 int ret;
340 engine = intel_engine_lookup_user(i915, engine_event_class(event),
341 engine_event_instance(event));
342 if (!engine)
343 return -ENODEV;
345 sample = engine_event_sample(event);
346 ret = engine_event_status(engine, sample);
347 if (ret)
348 return ret;
350 if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine))
351 ret = intel_enable_engine_stats(engine);
353 return ret;
356 static int i915_pmu_event_init(struct perf_event *event)
358 struct drm_i915_private *i915 =
359 container_of(event->pmu, typeof(*i915), pmu.base);
360 int ret;
362 if (event->attr.type != event->pmu->type)
363 return -ENOENT;
365 /* unsupported modes and filters */
366 if (event->attr.sample_period) /* no sampling */
367 return -EINVAL;
369 if (has_branch_stack(event))
370 return -EOPNOTSUPP;
372 if (event->cpu < 0)
373 return -EINVAL;
375 /* only allow running on one cpu at a time */
376 if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask))
377 return -EINVAL;
379 if (is_engine_event(event)) {
380 ret = engine_event_init(event);
381 } else {
382 ret = 0;
383 switch (event->attr.config) {
384 case I915_PMU_ACTUAL_FREQUENCY:
385 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
386 /* Requires a mutex for sampling! */
387 ret = -ENODEV;
388 case I915_PMU_REQUESTED_FREQUENCY:
389 if (INTEL_GEN(i915) < 6)
390 ret = -ENODEV;
391 break;
392 case I915_PMU_INTERRUPTS:
393 break;
394 case I915_PMU_RC6_RESIDENCY:
395 if (!HAS_RC6(i915))
396 ret = -ENODEV;
397 break;
398 default:
399 ret = -ENOENT;
400 break;
403 if (ret)
404 return ret;
406 if (!event->parent)
407 event->destroy = i915_pmu_event_destroy;
409 return 0;
412 static u64 __get_rc6(struct drm_i915_private *i915)
414 u64 val;
416 val = intel_rc6_residency_ns(i915,
417 IS_VALLEYVIEW(i915) ?
418 VLV_GT_RENDER_RC6 :
419 GEN6_GT_GFX_RC6);
421 if (HAS_RC6p(i915))
422 val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p);
424 if (HAS_RC6pp(i915))
425 val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp);
427 return val;
430 static u64 get_rc6(struct drm_i915_private *i915, bool locked)
432 #if IS_ENABLED(CONFIG_PM)
433 unsigned long flags;
434 u64 val;
436 if (intel_runtime_pm_get_if_in_use(i915)) {
437 val = __get_rc6(i915);
438 intel_runtime_pm_put(i915);
441 * If we are coming back from being runtime suspended we must
442 * be careful not to report a larger value than returned
443 * previously.
446 if (!locked)
447 spin_lock_irqsave(&i915->pmu.lock, flags);
449 if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
450 i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
451 i915->pmu.sample[__I915_SAMPLE_RC6].cur = val;
452 } else {
453 val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
456 if (!locked)
457 spin_unlock_irqrestore(&i915->pmu.lock, flags);
458 } else {
459 struct pci_dev *pdev = i915->drm.pdev;
460 struct device *kdev = &pdev->dev;
461 unsigned long flags2;
464 * We are runtime suspended.
466 * Report the delta from when the device was suspended to now,
467 * on top of the last known real value, as the approximated RC6
468 * counter value.
470 if (!locked)
471 spin_lock_irqsave(&i915->pmu.lock, flags);
473 spin_lock_irqsave(&kdev->power.lock, flags2);
475 if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
476 i915->pmu.suspended_jiffies_last =
477 kdev->power.suspended_jiffies;
479 val = kdev->power.suspended_jiffies -
480 i915->pmu.suspended_jiffies_last;
481 val += jiffies - kdev->power.accounting_timestamp;
483 spin_unlock_irqrestore(&kdev->power.lock, flags2);
485 val = jiffies_to_nsecs(val);
486 val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
487 i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
489 if (!locked)
490 spin_unlock_irqrestore(&i915->pmu.lock, flags);
493 return val;
494 #else
495 return __get_rc6(i915);
496 #endif
499 static u64 __i915_pmu_event_read(struct perf_event *event, bool locked)
501 struct drm_i915_private *i915 =
502 container_of(event->pmu, typeof(*i915), pmu.base);
503 u64 val = 0;
505 if (is_engine_event(event)) {
506 u8 sample = engine_event_sample(event);
507 struct intel_engine_cs *engine;
509 engine = intel_engine_lookup_user(i915,
510 engine_event_class(event),
511 engine_event_instance(event));
513 if (WARN_ON_ONCE(!engine)) {
514 /* Do nothing */
515 } else if (sample == I915_SAMPLE_BUSY &&
516 intel_engine_supports_stats(engine)) {
517 val = ktime_to_ns(intel_engine_get_busy_time(engine));
518 } else {
519 val = engine->pmu.sample[sample].cur;
521 } else {
522 switch (event->attr.config) {
523 case I915_PMU_ACTUAL_FREQUENCY:
524 val =
525 div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
526 FREQUENCY);
527 break;
528 case I915_PMU_REQUESTED_FREQUENCY:
529 val =
530 div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
531 FREQUENCY);
532 break;
533 case I915_PMU_INTERRUPTS:
534 val = count_interrupts(i915);
535 break;
536 case I915_PMU_RC6_RESIDENCY:
537 val = get_rc6(i915, locked);
538 break;
542 return val;
545 static void i915_pmu_event_read(struct perf_event *event)
547 struct hw_perf_event *hwc = &event->hw;
548 u64 prev, new;
550 again:
551 prev = local64_read(&hwc->prev_count);
552 new = __i915_pmu_event_read(event, false);
554 if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
555 goto again;
557 local64_add(new - prev, &event->count);
560 static void i915_pmu_enable(struct perf_event *event)
562 struct drm_i915_private *i915 =
563 container_of(event->pmu, typeof(*i915), pmu.base);
564 unsigned int bit = event_enabled_bit(event);
565 unsigned long flags;
567 spin_lock_irqsave(&i915->pmu.lock, flags);
570 * Update the bitmask of enabled events and increment
571 * the event reference counter.
573 GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
574 GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0);
575 i915->pmu.enable |= BIT_ULL(bit);
576 i915->pmu.enable_count[bit]++;
579 * Start the sampling timer if needed and not already enabled.
581 __i915_pmu_maybe_start_timer(i915);
584 * For per-engine events the bitmask and reference counting
585 * is stored per engine.
587 if (is_engine_event(event)) {
588 u8 sample = engine_event_sample(event);
589 struct intel_engine_cs *engine;
591 engine = intel_engine_lookup_user(i915,
592 engine_event_class(event),
593 engine_event_instance(event));
594 GEM_BUG_ON(!engine);
595 engine->pmu.enable |= BIT(sample);
597 GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
598 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
599 engine->pmu.enable_count[sample]++;
603 * Store the current counter value so we can report the correct delta
604 * for all listeners. Even when the event was already enabled and has
605 * an existing non-zero value.
607 local64_set(&event->hw.prev_count, __i915_pmu_event_read(event, true));
609 spin_unlock_irqrestore(&i915->pmu.lock, flags);
612 static void i915_pmu_disable(struct perf_event *event)
614 struct drm_i915_private *i915 =
615 container_of(event->pmu, typeof(*i915), pmu.base);
616 unsigned int bit = event_enabled_bit(event);
617 unsigned long flags;
619 spin_lock_irqsave(&i915->pmu.lock, flags);
621 if (is_engine_event(event)) {
622 u8 sample = engine_event_sample(event);
623 struct intel_engine_cs *engine;
625 engine = intel_engine_lookup_user(i915,
626 engine_event_class(event),
627 engine_event_instance(event));
628 GEM_BUG_ON(!engine);
629 GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
630 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
632 * Decrement the reference count and clear the enabled
633 * bitmask when the last listener on an event goes away.
635 if (--engine->pmu.enable_count[sample] == 0)
636 engine->pmu.enable &= ~BIT(sample);
639 GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
640 GEM_BUG_ON(i915->pmu.enable_count[bit] == 0);
642 * Decrement the reference count and clear the enabled
643 * bitmask when the last listener on an event goes away.
645 if (--i915->pmu.enable_count[bit] == 0) {
646 i915->pmu.enable &= ~BIT_ULL(bit);
647 i915->pmu.timer_enabled &= pmu_needs_timer(i915, true);
650 spin_unlock_irqrestore(&i915->pmu.lock, flags);
653 static void i915_pmu_event_start(struct perf_event *event, int flags)
655 i915_pmu_enable(event);
656 event->hw.state = 0;
659 static void i915_pmu_event_stop(struct perf_event *event, int flags)
661 if (flags & PERF_EF_UPDATE)
662 i915_pmu_event_read(event);
663 i915_pmu_disable(event);
664 event->hw.state = PERF_HES_STOPPED;
667 static int i915_pmu_event_add(struct perf_event *event, int flags)
669 if (flags & PERF_EF_START)
670 i915_pmu_event_start(event, flags);
672 return 0;
675 static void i915_pmu_event_del(struct perf_event *event, int flags)
677 i915_pmu_event_stop(event, PERF_EF_UPDATE);
680 static int i915_pmu_event_event_idx(struct perf_event *event)
682 return 0;
685 struct i915_str_attribute {
686 struct device_attribute attr;
687 const char *str;
690 static ssize_t i915_pmu_format_show(struct device *dev,
691 struct device_attribute *attr, char *buf)
693 struct i915_str_attribute *eattr;
695 eattr = container_of(attr, struct i915_str_attribute, attr);
696 return sprintf(buf, "%s\n", eattr->str);
699 #define I915_PMU_FORMAT_ATTR(_name, _config) \
700 (&((struct i915_str_attribute[]) { \
701 { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \
702 .str = _config, } \
703 })[0].attr.attr)
705 static struct attribute *i915_pmu_format_attrs[] = {
706 I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"),
707 NULL,
710 static const struct attribute_group i915_pmu_format_attr_group = {
711 .name = "format",
712 .attrs = i915_pmu_format_attrs,
715 struct i915_ext_attribute {
716 struct device_attribute attr;
717 unsigned long val;
720 static ssize_t i915_pmu_event_show(struct device *dev,
721 struct device_attribute *attr, char *buf)
723 struct i915_ext_attribute *eattr;
725 eattr = container_of(attr, struct i915_ext_attribute, attr);
726 return sprintf(buf, "config=0x%lx\n", eattr->val);
729 #define I915_EVENT_ATTR(_name, _config) \
730 (&((struct i915_ext_attribute[]) { \
731 { .attr = __ATTR(_name, 0444, i915_pmu_event_show, NULL), \
732 .val = _config, } \
733 })[0].attr.attr)
735 #define I915_EVENT_STR(_name, _str) \
736 (&((struct perf_pmu_events_attr[]) { \
737 { .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
738 .id = 0, \
739 .event_str = _str, } \
740 })[0].attr.attr)
742 #define I915_EVENT(_name, _config, _unit) \
743 I915_EVENT_ATTR(_name, _config), \
744 I915_EVENT_STR(_name.unit, _unit)
746 #define I915_ENGINE_EVENT(_name, _class, _instance, _sample) \
747 I915_EVENT_ATTR(_name, __I915_PMU_ENGINE(_class, _instance, _sample)), \
748 I915_EVENT_STR(_name.unit, "ns")
750 #define I915_ENGINE_EVENTS(_name, _class, _instance) \
751 I915_ENGINE_EVENT(_name##_instance-busy, _class, _instance, I915_SAMPLE_BUSY), \
752 I915_ENGINE_EVENT(_name##_instance-sema, _class, _instance, I915_SAMPLE_SEMA), \
753 I915_ENGINE_EVENT(_name##_instance-wait, _class, _instance, I915_SAMPLE_WAIT)
755 static struct attribute *i915_pmu_events_attrs[] = {
756 I915_ENGINE_EVENTS(rcs, I915_ENGINE_CLASS_RENDER, 0),
757 I915_ENGINE_EVENTS(bcs, I915_ENGINE_CLASS_COPY, 0),
758 I915_ENGINE_EVENTS(vcs, I915_ENGINE_CLASS_VIDEO, 0),
759 I915_ENGINE_EVENTS(vcs, I915_ENGINE_CLASS_VIDEO, 1),
760 I915_ENGINE_EVENTS(vecs, I915_ENGINE_CLASS_VIDEO_ENHANCE, 0),
762 I915_EVENT(actual-frequency, I915_PMU_ACTUAL_FREQUENCY, "MHz"),
763 I915_EVENT(requested-frequency, I915_PMU_REQUESTED_FREQUENCY, "MHz"),
765 I915_EVENT_ATTR(interrupts, I915_PMU_INTERRUPTS),
767 I915_EVENT(rc6-residency, I915_PMU_RC6_RESIDENCY, "ns"),
769 NULL,
772 static const struct attribute_group i915_pmu_events_attr_group = {
773 .name = "events",
774 .attrs = i915_pmu_events_attrs,
777 static ssize_t
778 i915_pmu_get_attr_cpumask(struct device *dev,
779 struct device_attribute *attr,
780 char *buf)
782 return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask);
785 static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL);
787 static struct attribute *i915_cpumask_attrs[] = {
788 &dev_attr_cpumask.attr,
789 NULL,
792 static struct attribute_group i915_pmu_cpumask_attr_group = {
793 .attrs = i915_cpumask_attrs,
796 static const struct attribute_group *i915_pmu_attr_groups[] = {
797 &i915_pmu_format_attr_group,
798 &i915_pmu_events_attr_group,
799 &i915_pmu_cpumask_attr_group,
800 NULL
803 static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
805 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
807 GEM_BUG_ON(!pmu->base.event_init);
809 /* Select the first online CPU as a designated reader. */
810 if (!cpumask_weight(&i915_pmu_cpumask))
811 cpumask_set_cpu(cpu, &i915_pmu_cpumask);
813 return 0;
816 static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
818 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
819 unsigned int target;
821 GEM_BUG_ON(!pmu->base.event_init);
823 if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
824 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
825 /* Migrate events if there is a valid target */
826 if (target < nr_cpu_ids) {
827 cpumask_set_cpu(target, &i915_pmu_cpumask);
828 perf_pmu_migrate_context(&pmu->base, cpu, target);
832 return 0;
835 static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
837 static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
839 enum cpuhp_state slot;
840 int ret;
842 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
843 "perf/x86/intel/i915:online",
844 i915_pmu_cpu_online,
845 i915_pmu_cpu_offline);
846 if (ret < 0)
847 return ret;
849 slot = ret;
850 ret = cpuhp_state_add_instance(slot, &i915->pmu.node);
851 if (ret) {
852 cpuhp_remove_multi_state(slot);
853 return ret;
856 cpuhp_slot = slot;
857 return 0;
860 static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915)
862 WARN_ON(cpuhp_slot == CPUHP_INVALID);
863 WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &i915->pmu.node));
864 cpuhp_remove_multi_state(cpuhp_slot);
867 void i915_pmu_register(struct drm_i915_private *i915)
869 int ret;
871 if (INTEL_GEN(i915) <= 2) {
872 DRM_INFO("PMU not supported for this GPU.");
873 return;
876 i915->pmu.base.attr_groups = i915_pmu_attr_groups;
877 i915->pmu.base.task_ctx_nr = perf_invalid_context;
878 i915->pmu.base.event_init = i915_pmu_event_init;
879 i915->pmu.base.add = i915_pmu_event_add;
880 i915->pmu.base.del = i915_pmu_event_del;
881 i915->pmu.base.start = i915_pmu_event_start;
882 i915->pmu.base.stop = i915_pmu_event_stop;
883 i915->pmu.base.read = i915_pmu_event_read;
884 i915->pmu.base.event_idx = i915_pmu_event_event_idx;
886 spin_lock_init(&i915->pmu.lock);
887 hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
888 i915->pmu.timer.function = i915_sample;
890 ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
891 if (ret)
892 goto err;
894 ret = i915_pmu_register_cpuhp_state(i915);
895 if (ret)
896 goto err_unreg;
898 return;
900 err_unreg:
901 perf_pmu_unregister(&i915->pmu.base);
902 err:
903 i915->pmu.base.event_init = NULL;
904 DRM_NOTE("Failed to register PMU! (err=%d)\n", ret);
907 void i915_pmu_unregister(struct drm_i915_private *i915)
909 if (!i915->pmu.base.event_init)
910 return;
912 WARN_ON(i915->pmu.enable);
914 hrtimer_cancel(&i915->pmu.timer);
916 i915_pmu_unregister_cpuhp_state(i915);
918 perf_pmu_unregister(&i915->pmu.base);
919 i915->pmu.base.event_init = NULL;