treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / perf / fsl_imx8_ddr_perf.c
blob95dca2cb526500325dc56e9dd6a4f55e620a6eb2
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2017 NXP
4 * Copyright 2016 Freescale Semiconductor, Inc.
5 */
7 #include <linux/bitfield.h>
8 #include <linux/init.h>
9 #include <linux/interrupt.h>
10 #include <linux/io.h>
11 #include <linux/module.h>
12 #include <linux/of.h>
13 #include <linux/of_address.h>
14 #include <linux/of_device.h>
15 #include <linux/of_irq.h>
16 #include <linux/perf_event.h>
17 #include <linux/slab.h>
19 #define COUNTER_CNTL 0x0
20 #define COUNTER_READ 0x20
22 #define COUNTER_DPCR1 0x30
24 #define CNTL_OVER 0x1
25 #define CNTL_CLEAR 0x2
26 #define CNTL_EN 0x4
27 #define CNTL_EN_MASK 0xFFFFFFFB
28 #define CNTL_CLEAR_MASK 0xFFFFFFFD
29 #define CNTL_OVER_MASK 0xFFFFFFFE
31 #define CNTL_CSV_SHIFT 24
32 #define CNTL_CSV_MASK (0xFF << CNTL_CSV_SHIFT)
34 #define EVENT_CYCLES_ID 0
35 #define EVENT_CYCLES_COUNTER 0
36 #define NUM_COUNTERS 4
38 #define AXI_MASKING_REVERT 0xffff0000 /* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */
40 #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
42 #define DDR_PERF_DEV_NAME "imx8_ddr"
43 #define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu"
45 static DEFINE_IDA(ddr_ida);
47 /* DDR Perf hardware feature */
48 #define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */
49 #define DDR_CAP_AXI_ID_FILTER_ENHANCED 0x3 /* support enhanced AXI ID filter */
51 struct fsl_ddr_devtype_data {
52 unsigned int quirks; /* quirks needed for different DDR Perf core */
55 static const struct fsl_ddr_devtype_data imx8_devtype_data;
57 static const struct fsl_ddr_devtype_data imx8m_devtype_data = {
58 .quirks = DDR_CAP_AXI_ID_FILTER,
61 static const struct fsl_ddr_devtype_data imx8mp_devtype_data = {
62 .quirks = DDR_CAP_AXI_ID_FILTER_ENHANCED,
65 static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
66 { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
67 { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
68 { .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data},
69 { /* sentinel */ }
71 MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids);
73 struct ddr_pmu {
74 struct pmu pmu;
75 void __iomem *base;
76 unsigned int cpu;
77 struct hlist_node node;
78 struct device *dev;
79 struct perf_event *events[NUM_COUNTERS];
80 int active_events;
81 enum cpuhp_state cpuhp_state;
82 const struct fsl_ddr_devtype_data *devtype_data;
83 int irq;
84 int id;
87 enum ddr_perf_filter_capabilities {
88 PERF_CAP_AXI_ID_FILTER = 0,
89 PERF_CAP_AXI_ID_FILTER_ENHANCED,
90 PERF_CAP_AXI_ID_FEAT_MAX,
93 static u32 ddr_perf_filter_cap_get(struct ddr_pmu *pmu, int cap)
95 u32 quirks = pmu->devtype_data->quirks;
97 switch (cap) {
98 case PERF_CAP_AXI_ID_FILTER:
99 return !!(quirks & DDR_CAP_AXI_ID_FILTER);
100 case PERF_CAP_AXI_ID_FILTER_ENHANCED:
101 quirks &= DDR_CAP_AXI_ID_FILTER_ENHANCED;
102 return quirks == DDR_CAP_AXI_ID_FILTER_ENHANCED;
103 default:
104 WARN(1, "unknown filter cap %d\n", cap);
107 return 0;
110 static ssize_t ddr_perf_filter_cap_show(struct device *dev,
111 struct device_attribute *attr,
112 char *buf)
114 struct ddr_pmu *pmu = dev_get_drvdata(dev);
115 struct dev_ext_attribute *ea =
116 container_of(attr, struct dev_ext_attribute, attr);
117 int cap = (long)ea->var;
119 return snprintf(buf, PAGE_SIZE, "%u\n",
120 ddr_perf_filter_cap_get(pmu, cap));
123 #define PERF_EXT_ATTR_ENTRY(_name, _func, _var) \
124 (&((struct dev_ext_attribute) { \
125 __ATTR(_name, 0444, _func, NULL), (void *)_var \
126 }).attr.attr)
128 #define PERF_FILTER_EXT_ATTR_ENTRY(_name, _var) \
129 PERF_EXT_ATTR_ENTRY(_name, ddr_perf_filter_cap_show, _var)
131 static struct attribute *ddr_perf_filter_cap_attr[] = {
132 PERF_FILTER_EXT_ATTR_ENTRY(filter, PERF_CAP_AXI_ID_FILTER),
133 PERF_FILTER_EXT_ATTR_ENTRY(enhanced_filter, PERF_CAP_AXI_ID_FILTER_ENHANCED),
134 NULL,
137 static struct attribute_group ddr_perf_filter_cap_attr_group = {
138 .name = "caps",
139 .attrs = ddr_perf_filter_cap_attr,
142 static ssize_t ddr_perf_cpumask_show(struct device *dev,
143 struct device_attribute *attr, char *buf)
145 struct ddr_pmu *pmu = dev_get_drvdata(dev);
147 return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
150 static struct device_attribute ddr_perf_cpumask_attr =
151 __ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL);
153 static struct attribute *ddr_perf_cpumask_attrs[] = {
154 &ddr_perf_cpumask_attr.attr,
155 NULL,
158 static struct attribute_group ddr_perf_cpumask_attr_group = {
159 .attrs = ddr_perf_cpumask_attrs,
162 static ssize_t
163 ddr_pmu_event_show(struct device *dev, struct device_attribute *attr,
164 char *page)
166 struct perf_pmu_events_attr *pmu_attr;
168 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
169 return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
172 #define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \
173 (&((struct perf_pmu_events_attr[]) { \
174 { .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\
175 .id = _id, } \
176 })[0].attr.attr)
178 static struct attribute *ddr_perf_events_attrs[] = {
179 IMX8_DDR_PMU_EVENT_ATTR(cycles, EVENT_CYCLES_ID),
180 IMX8_DDR_PMU_EVENT_ATTR(selfresh, 0x01),
181 IMX8_DDR_PMU_EVENT_ATTR(read-accesses, 0x04),
182 IMX8_DDR_PMU_EVENT_ATTR(write-accesses, 0x05),
183 IMX8_DDR_PMU_EVENT_ATTR(read-queue-depth, 0x08),
184 IMX8_DDR_PMU_EVENT_ATTR(write-queue-depth, 0x09),
185 IMX8_DDR_PMU_EVENT_ATTR(lp-read-credit-cnt, 0x10),
186 IMX8_DDR_PMU_EVENT_ATTR(hp-read-credit-cnt, 0x11),
187 IMX8_DDR_PMU_EVENT_ATTR(write-credit-cnt, 0x12),
188 IMX8_DDR_PMU_EVENT_ATTR(read-command, 0x20),
189 IMX8_DDR_PMU_EVENT_ATTR(write-command, 0x21),
190 IMX8_DDR_PMU_EVENT_ATTR(read-modify-write-command, 0x22),
191 IMX8_DDR_PMU_EVENT_ATTR(hp-read, 0x23),
192 IMX8_DDR_PMU_EVENT_ATTR(hp-req-nocredit, 0x24),
193 IMX8_DDR_PMU_EVENT_ATTR(hp-xact-credit, 0x25),
194 IMX8_DDR_PMU_EVENT_ATTR(lp-req-nocredit, 0x26),
195 IMX8_DDR_PMU_EVENT_ATTR(lp-xact-credit, 0x27),
196 IMX8_DDR_PMU_EVENT_ATTR(wr-xact-credit, 0x29),
197 IMX8_DDR_PMU_EVENT_ATTR(read-cycles, 0x2a),
198 IMX8_DDR_PMU_EVENT_ATTR(write-cycles, 0x2b),
199 IMX8_DDR_PMU_EVENT_ATTR(read-write-transition, 0x30),
200 IMX8_DDR_PMU_EVENT_ATTR(precharge, 0x31),
201 IMX8_DDR_PMU_EVENT_ATTR(activate, 0x32),
202 IMX8_DDR_PMU_EVENT_ATTR(load-mode, 0x33),
203 IMX8_DDR_PMU_EVENT_ATTR(perf-mwr, 0x34),
204 IMX8_DDR_PMU_EVENT_ATTR(read, 0x35),
205 IMX8_DDR_PMU_EVENT_ATTR(read-activate, 0x36),
206 IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37),
207 IMX8_DDR_PMU_EVENT_ATTR(write, 0x38),
208 IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39),
209 IMX8_DDR_PMU_EVENT_ATTR(axid-read, 0x41),
210 IMX8_DDR_PMU_EVENT_ATTR(axid-write, 0x42),
211 NULL,
214 static struct attribute_group ddr_perf_events_attr_group = {
215 .name = "events",
216 .attrs = ddr_perf_events_attrs,
219 PMU_FORMAT_ATTR(event, "config:0-7");
220 PMU_FORMAT_ATTR(axi_id, "config1:0-15");
221 PMU_FORMAT_ATTR(axi_mask, "config1:16-31");
223 static struct attribute *ddr_perf_format_attrs[] = {
224 &format_attr_event.attr,
225 &format_attr_axi_id.attr,
226 &format_attr_axi_mask.attr,
227 NULL,
230 static struct attribute_group ddr_perf_format_attr_group = {
231 .name = "format",
232 .attrs = ddr_perf_format_attrs,
235 static const struct attribute_group *attr_groups[] = {
236 &ddr_perf_events_attr_group,
237 &ddr_perf_format_attr_group,
238 &ddr_perf_cpumask_attr_group,
239 &ddr_perf_filter_cap_attr_group,
240 NULL,
243 static bool ddr_perf_is_filtered(struct perf_event *event)
245 return event->attr.config == 0x41 || event->attr.config == 0x42;
248 static u32 ddr_perf_filter_val(struct perf_event *event)
250 return event->attr.config1;
253 static bool ddr_perf_filters_compatible(struct perf_event *a,
254 struct perf_event *b)
256 if (!ddr_perf_is_filtered(a))
257 return true;
258 if (!ddr_perf_is_filtered(b))
259 return true;
260 return ddr_perf_filter_val(a) == ddr_perf_filter_val(b);
263 static bool ddr_perf_is_enhanced_filtered(struct perf_event *event)
265 unsigned int filt;
266 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
268 filt = pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED;
269 return (filt == DDR_CAP_AXI_ID_FILTER_ENHANCED) &&
270 ddr_perf_is_filtered(event);
273 static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event)
275 int i;
278 * Always map cycle event to counter 0
279 * Cycles counter is dedicated for cycle event
280 * can't used for the other events
282 if (event == EVENT_CYCLES_ID) {
283 if (pmu->events[EVENT_CYCLES_COUNTER] == NULL)
284 return EVENT_CYCLES_COUNTER;
285 else
286 return -ENOENT;
289 for (i = 1; i < NUM_COUNTERS; i++) {
290 if (pmu->events[i] == NULL)
291 return i;
294 return -ENOENT;
297 static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter)
299 pmu->events[counter] = NULL;
302 static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
304 struct perf_event *event = pmu->events[counter];
305 void __iomem *base = pmu->base;
308 * return bytes instead of bursts from ddr transaction for
309 * axid-read and axid-write event if PMU core supports enhanced
310 * filter.
312 base += ddr_perf_is_enhanced_filtered(event) ? COUNTER_DPCR1 :
313 COUNTER_READ;
314 return readl_relaxed(base + counter * 4);
317 static int ddr_perf_event_init(struct perf_event *event)
319 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
320 struct hw_perf_event *hwc = &event->hw;
321 struct perf_event *sibling;
323 if (event->attr.type != event->pmu->type)
324 return -ENOENT;
326 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
327 return -EOPNOTSUPP;
329 if (event->cpu < 0) {
330 dev_warn(pmu->dev, "Can't provide per-task data!\n");
331 return -EOPNOTSUPP;
335 * We must NOT create groups containing mixed PMUs, although software
336 * events are acceptable (for example to create a CCN group
337 * periodically read when a hrtimer aka cpu-clock leader triggers).
339 if (event->group_leader->pmu != event->pmu &&
340 !is_software_event(event->group_leader))
341 return -EINVAL;
343 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
344 if (!ddr_perf_filters_compatible(event, event->group_leader))
345 return -EINVAL;
346 for_each_sibling_event(sibling, event->group_leader) {
347 if (!ddr_perf_filters_compatible(event, sibling))
348 return -EINVAL;
352 for_each_sibling_event(sibling, event->group_leader) {
353 if (sibling->pmu != event->pmu &&
354 !is_software_event(sibling))
355 return -EINVAL;
358 event->cpu = pmu->cpu;
359 hwc->idx = -1;
361 return 0;
365 static void ddr_perf_event_update(struct perf_event *event)
367 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
368 struct hw_perf_event *hwc = &event->hw;
369 u64 delta, prev_raw_count, new_raw_count;
370 int counter = hwc->idx;
372 do {
373 prev_raw_count = local64_read(&hwc->prev_count);
374 new_raw_count = ddr_perf_read_counter(pmu, counter);
375 } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
376 new_raw_count) != prev_raw_count);
378 delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF;
380 local64_add(delta, &event->count);
383 static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
384 int counter, bool enable)
386 u8 reg = counter * 4 + COUNTER_CNTL;
387 int val;
389 if (enable) {
391 * must disable first, then enable again
392 * otherwise, cycle counter will not work
393 * if previous state is enabled.
395 writel(0, pmu->base + reg);
396 val = CNTL_EN | CNTL_CLEAR;
397 val |= FIELD_PREP(CNTL_CSV_MASK, config);
398 writel(val, pmu->base + reg);
399 } else {
400 /* Disable counter */
401 writel(0, pmu->base + reg);
405 static void ddr_perf_event_start(struct perf_event *event, int flags)
407 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
408 struct hw_perf_event *hwc = &event->hw;
409 int counter = hwc->idx;
411 local64_set(&hwc->prev_count, 0);
413 ddr_perf_counter_enable(pmu, event->attr.config, counter, true);
415 hwc->state = 0;
418 static int ddr_perf_event_add(struct perf_event *event, int flags)
420 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
421 struct hw_perf_event *hwc = &event->hw;
422 int counter;
423 int cfg = event->attr.config;
424 int cfg1 = event->attr.config1;
426 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
427 int i;
429 for (i = 1; i < NUM_COUNTERS; i++) {
430 if (pmu->events[i] &&
431 !ddr_perf_filters_compatible(event, pmu->events[i]))
432 return -EINVAL;
435 if (ddr_perf_is_filtered(event)) {
436 /* revert axi id masking(axi_mask) value */
437 cfg1 ^= AXI_MASKING_REVERT;
438 writel(cfg1, pmu->base + COUNTER_DPCR1);
442 counter = ddr_perf_alloc_counter(pmu, cfg);
443 if (counter < 0) {
444 dev_dbg(pmu->dev, "There are not enough counters\n");
445 return -EOPNOTSUPP;
448 pmu->events[counter] = event;
449 pmu->active_events++;
450 hwc->idx = counter;
452 hwc->state |= PERF_HES_STOPPED;
454 if (flags & PERF_EF_START)
455 ddr_perf_event_start(event, flags);
457 return 0;
460 static void ddr_perf_event_stop(struct perf_event *event, int flags)
462 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
463 struct hw_perf_event *hwc = &event->hw;
464 int counter = hwc->idx;
466 ddr_perf_counter_enable(pmu, event->attr.config, counter, false);
467 ddr_perf_event_update(event);
469 hwc->state |= PERF_HES_STOPPED;
472 static void ddr_perf_event_del(struct perf_event *event, int flags)
474 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
475 struct hw_perf_event *hwc = &event->hw;
476 int counter = hwc->idx;
478 ddr_perf_event_stop(event, PERF_EF_UPDATE);
480 ddr_perf_free_counter(pmu, counter);
481 pmu->active_events--;
482 hwc->idx = -1;
485 static void ddr_perf_pmu_enable(struct pmu *pmu)
487 struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
489 /* enable cycle counter if cycle is not active event list */
490 if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
491 ddr_perf_counter_enable(ddr_pmu,
492 EVENT_CYCLES_ID,
493 EVENT_CYCLES_COUNTER,
494 true);
497 static void ddr_perf_pmu_disable(struct pmu *pmu)
499 struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
501 if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
502 ddr_perf_counter_enable(ddr_pmu,
503 EVENT_CYCLES_ID,
504 EVENT_CYCLES_COUNTER,
505 false);
508 static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
509 struct device *dev)
511 *pmu = (struct ddr_pmu) {
512 .pmu = (struct pmu) {
513 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
514 .task_ctx_nr = perf_invalid_context,
515 .attr_groups = attr_groups,
516 .event_init = ddr_perf_event_init,
517 .add = ddr_perf_event_add,
518 .del = ddr_perf_event_del,
519 .start = ddr_perf_event_start,
520 .stop = ddr_perf_event_stop,
521 .read = ddr_perf_event_update,
522 .pmu_enable = ddr_perf_pmu_enable,
523 .pmu_disable = ddr_perf_pmu_disable,
525 .base = base,
526 .dev = dev,
529 pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL);
530 return pmu->id;
533 static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
535 int i;
536 struct ddr_pmu *pmu = (struct ddr_pmu *) p;
537 struct perf_event *event, *cycle_event = NULL;
539 /* all counter will stop if cycle counter disabled */
540 ddr_perf_counter_enable(pmu,
541 EVENT_CYCLES_ID,
542 EVENT_CYCLES_COUNTER,
543 false);
545 * When the cycle counter overflows, all counters are stopped,
546 * and an IRQ is raised. If any other counter overflows, it
547 * continues counting, and no IRQ is raised.
549 * Cycles occur at least 4 times as often as other events, so we
550 * can update all events on a cycle counter overflow and not
551 * lose events.
554 for (i = 0; i < NUM_COUNTERS; i++) {
556 if (!pmu->events[i])
557 continue;
559 event = pmu->events[i];
561 ddr_perf_event_update(event);
563 if (event->hw.idx == EVENT_CYCLES_COUNTER)
564 cycle_event = event;
567 ddr_perf_counter_enable(pmu,
568 EVENT_CYCLES_ID,
569 EVENT_CYCLES_COUNTER,
570 true);
571 if (cycle_event)
572 ddr_perf_event_update(cycle_event);
574 return IRQ_HANDLED;
577 static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
579 struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node);
580 int target;
582 if (cpu != pmu->cpu)
583 return 0;
585 target = cpumask_any_but(cpu_online_mask, cpu);
586 if (target >= nr_cpu_ids)
587 return 0;
589 perf_pmu_migrate_context(&pmu->pmu, cpu, target);
590 pmu->cpu = target;
592 WARN_ON(irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu)));
594 return 0;
597 static int ddr_perf_probe(struct platform_device *pdev)
599 struct ddr_pmu *pmu;
600 struct device_node *np;
601 void __iomem *base;
602 char *name;
603 int num;
604 int ret;
605 int irq;
607 base = devm_platform_ioremap_resource(pdev, 0);
608 if (IS_ERR(base))
609 return PTR_ERR(base);
611 np = pdev->dev.of_node;
613 pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
614 if (!pmu)
615 return -ENOMEM;
617 num = ddr_perf_init(pmu, base, &pdev->dev);
619 platform_set_drvdata(pdev, pmu);
621 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d",
622 num);
623 if (!name)
624 return -ENOMEM;
626 pmu->devtype_data = of_device_get_match_data(&pdev->dev);
628 pmu->cpu = raw_smp_processor_id();
629 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
630 DDR_CPUHP_CB_NAME,
631 NULL,
632 ddr_perf_offline_cpu);
634 if (ret < 0) {
635 dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n");
636 goto cpuhp_state_err;
639 pmu->cpuhp_state = ret;
641 /* Register the pmu instance for cpu hotplug */
642 ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
643 if (ret) {
644 dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
645 goto cpuhp_instance_err;
648 /* Request irq */
649 irq = of_irq_get(np, 0);
650 if (irq < 0) {
651 dev_err(&pdev->dev, "Failed to get irq: %d", irq);
652 ret = irq;
653 goto ddr_perf_err;
656 ret = devm_request_irq(&pdev->dev, irq,
657 ddr_perf_irq_handler,
658 IRQF_NOBALANCING | IRQF_NO_THREAD,
659 DDR_CPUHP_CB_NAME,
660 pmu);
661 if (ret < 0) {
662 dev_err(&pdev->dev, "Request irq failed: %d", ret);
663 goto ddr_perf_err;
666 pmu->irq = irq;
667 ret = irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu));
668 if (ret) {
669 dev_err(pmu->dev, "Failed to set interrupt affinity!\n");
670 goto ddr_perf_err;
673 ret = perf_pmu_register(&pmu->pmu, name, -1);
674 if (ret)
675 goto ddr_perf_err;
677 return 0;
679 ddr_perf_err:
680 cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
681 cpuhp_instance_err:
682 cpuhp_remove_multi_state(pmu->cpuhp_state);
683 cpuhp_state_err:
684 ida_simple_remove(&ddr_ida, pmu->id);
685 dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
686 return ret;
689 static int ddr_perf_remove(struct platform_device *pdev)
691 struct ddr_pmu *pmu = platform_get_drvdata(pdev);
693 cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
694 cpuhp_remove_multi_state(pmu->cpuhp_state);
695 irq_set_affinity_hint(pmu->irq, NULL);
697 perf_pmu_unregister(&pmu->pmu);
699 ida_simple_remove(&ddr_ida, pmu->id);
700 return 0;
703 static struct platform_driver imx_ddr_pmu_driver = {
704 .driver = {
705 .name = "imx-ddr-pmu",
706 .of_match_table = imx_ddr_pmu_dt_ids,
708 .probe = ddr_perf_probe,
709 .remove = ddr_perf_remove,
712 module_platform_driver(imx_ddr_pmu_driver);
713 MODULE_LICENSE("GPL v2");