treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / devfreq / tegra30-devfreq.c
blob0b65f89d74d565d07369b7d96e401edf7c6df302
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * A devfreq driver for NVIDIA Tegra SoCs
5 * Copyright (c) 2014 NVIDIA CORPORATION. All rights reserved.
6 * Copyright (C) 2014 Google, Inc
7 */
9 #include <linux/clk.h>
10 #include <linux/cpufreq.h>
11 #include <linux/devfreq.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/irq.h>
15 #include <linux/module.h>
16 #include <linux/of_device.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_opp.h>
19 #include <linux/reset.h>
20 #include <linux/workqueue.h>
22 #include "governor.h"
24 #define ACTMON_GLB_STATUS 0x0
25 #define ACTMON_GLB_PERIOD_CTRL 0x4
27 #define ACTMON_DEV_CTRL 0x0
28 #define ACTMON_DEV_CTRL_K_VAL_SHIFT 10
29 #define ACTMON_DEV_CTRL_ENB_PERIODIC BIT(18)
30 #define ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN BIT(20)
31 #define ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN BIT(21)
32 #define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT 23
33 #define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT 26
34 #define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN BIT(29)
35 #define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN BIT(30)
36 #define ACTMON_DEV_CTRL_ENB BIT(31)
38 #define ACTMON_DEV_CTRL_STOP 0x00000000
40 #define ACTMON_DEV_UPPER_WMARK 0x4
41 #define ACTMON_DEV_LOWER_WMARK 0x8
42 #define ACTMON_DEV_INIT_AVG 0xc
43 #define ACTMON_DEV_AVG_UPPER_WMARK 0x10
44 #define ACTMON_DEV_AVG_LOWER_WMARK 0x14
45 #define ACTMON_DEV_COUNT_WEIGHT 0x18
46 #define ACTMON_DEV_AVG_COUNT 0x20
47 #define ACTMON_DEV_INTR_STATUS 0x24
49 #define ACTMON_INTR_STATUS_CLEAR 0xffffffff
51 #define ACTMON_DEV_INTR_CONSECUTIVE_UPPER BIT(31)
52 #define ACTMON_DEV_INTR_CONSECUTIVE_LOWER BIT(30)
54 #define ACTMON_ABOVE_WMARK_WINDOW 1
55 #define ACTMON_BELOW_WMARK_WINDOW 3
56 #define ACTMON_BOOST_FREQ_STEP 16000
59 * Activity counter is incremented every 256 memory transactions, and each
60 * transaction takes 4 EMC clocks for Tegra124; So the COUNT_WEIGHT is
61 * 4 * 256 = 1024.
63 #define ACTMON_COUNT_WEIGHT 0x400
66 * ACTMON_AVERAGE_WINDOW_LOG2: default value for @DEV_CTRL_K_VAL, which
67 * translates to 2 ^ (K_VAL + 1). ex: 2 ^ (6 + 1) = 128
69 #define ACTMON_AVERAGE_WINDOW_LOG2 6
70 #define ACTMON_SAMPLING_PERIOD 12 /* ms */
71 #define ACTMON_DEFAULT_AVG_BAND 6 /* 1/10 of % */
73 #define KHZ 1000
75 #define KHZ_MAX (ULONG_MAX / KHZ)
77 /* Assume that the bus is saturated if the utilization is 25% */
78 #define BUS_SATURATION_RATIO 25
80 /**
81 * struct tegra_devfreq_device_config - configuration specific to an ACTMON
82 * device
84 * Coefficients and thresholds are percentages unless otherwise noted
86 struct tegra_devfreq_device_config {
87 u32 offset;
88 u32 irq_mask;
90 /* Factors applied to boost_freq every consecutive watermark breach */
91 unsigned int boost_up_coeff;
92 unsigned int boost_down_coeff;
94 /* Define the watermark bounds when applied to the current avg */
95 unsigned int boost_up_threshold;
96 unsigned int boost_down_threshold;
99 * Threshold of activity (cycles translated to kHz) below which the
100 * CPU frequency isn't to be taken into account. This is to avoid
101 * increasing the EMC frequency when the CPU is very busy but not
102 * accessing the bus often.
104 u32 avg_dependency_threshold;
107 enum tegra_actmon_device {
108 MCALL = 0,
109 MCCPU,
112 static const struct tegra_devfreq_device_config actmon_device_configs[] = {
114 /* MCALL: All memory accesses (including from the CPUs) */
115 .offset = 0x1c0,
116 .irq_mask = 1 << 26,
117 .boost_up_coeff = 200,
118 .boost_down_coeff = 50,
119 .boost_up_threshold = 60,
120 .boost_down_threshold = 40,
123 /* MCCPU: memory accesses from the CPUs */
124 .offset = 0x200,
125 .irq_mask = 1 << 25,
126 .boost_up_coeff = 800,
127 .boost_down_coeff = 40,
128 .boost_up_threshold = 27,
129 .boost_down_threshold = 10,
130 .avg_dependency_threshold = 16000, /* 16MHz in kHz units */
135 * struct tegra_devfreq_device - state specific to an ACTMON device
137 * Frequencies are in kHz.
139 struct tegra_devfreq_device {
140 const struct tegra_devfreq_device_config *config;
141 void __iomem *regs;
143 /* Average event count sampled in the last interrupt */
144 u32 avg_count;
147 * Extra frequency to increase the target by due to consecutive
148 * watermark breaches.
150 unsigned long boost_freq;
152 /* Optimal frequency calculated from the stats for this device */
153 unsigned long target_freq;
156 struct tegra_devfreq {
157 struct devfreq *devfreq;
159 struct reset_control *reset;
160 struct clk *clock;
161 void __iomem *regs;
163 struct clk *emc_clock;
164 unsigned long max_freq;
165 unsigned long cur_freq;
166 struct notifier_block clk_rate_change_nb;
168 struct delayed_work cpufreq_update_work;
169 struct notifier_block cpu_rate_change_nb;
171 struct tegra_devfreq_device devices[ARRAY_SIZE(actmon_device_configs)];
173 unsigned int irq;
175 bool started;
178 struct tegra_actmon_emc_ratio {
179 unsigned long cpu_freq;
180 unsigned long emc_freq;
183 static const struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
184 { 1400000, KHZ_MAX },
185 { 1200000, 750000 },
186 { 1100000, 600000 },
187 { 1000000, 500000 },
188 { 800000, 375000 },
189 { 500000, 200000 },
190 { 250000, 100000 },
193 static u32 actmon_readl(struct tegra_devfreq *tegra, u32 offset)
195 return readl_relaxed(tegra->regs + offset);
198 static void actmon_writel(struct tegra_devfreq *tegra, u32 val, u32 offset)
200 writel_relaxed(val, tegra->regs + offset);
203 static u32 device_readl(struct tegra_devfreq_device *dev, u32 offset)
205 return readl_relaxed(dev->regs + offset);
208 static void device_writel(struct tegra_devfreq_device *dev, u32 val,
209 u32 offset)
211 writel_relaxed(val, dev->regs + offset);
214 static unsigned long do_percent(unsigned long long val, unsigned int pct)
216 val = val * pct;
217 do_div(val, 100);
220 * High freq + high boosting percent + large polling interval are
221 * resulting in integer overflow when watermarks are calculated.
223 return min_t(u64, val, U32_MAX);
226 static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq *tegra,
227 struct tegra_devfreq_device *dev)
229 u32 avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ;
230 u32 band = avg_band_freq * tegra->devfreq->profile->polling_ms;
231 u32 avg;
233 avg = min(dev->avg_count, U32_MAX - band);
234 device_writel(dev, avg + band, ACTMON_DEV_AVG_UPPER_WMARK);
236 avg = max(dev->avg_count, band);
237 device_writel(dev, avg - band, ACTMON_DEV_AVG_LOWER_WMARK);
240 static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
241 struct tegra_devfreq_device *dev)
243 u32 val = tegra->cur_freq * tegra->devfreq->profile->polling_ms;
245 device_writel(dev, do_percent(val, dev->config->boost_up_threshold),
246 ACTMON_DEV_UPPER_WMARK);
248 device_writel(dev, do_percent(val, dev->config->boost_down_threshold),
249 ACTMON_DEV_LOWER_WMARK);
252 static void actmon_isr_device(struct tegra_devfreq *tegra,
253 struct tegra_devfreq_device *dev)
255 u32 intr_status, dev_ctrl;
257 dev->avg_count = device_readl(dev, ACTMON_DEV_AVG_COUNT);
258 tegra_devfreq_update_avg_wmark(tegra, dev);
260 intr_status = device_readl(dev, ACTMON_DEV_INTR_STATUS);
261 dev_ctrl = device_readl(dev, ACTMON_DEV_CTRL);
263 if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_UPPER) {
265 * new_boost = min(old_boost * up_coef + step, max_freq)
267 dev->boost_freq = do_percent(dev->boost_freq,
268 dev->config->boost_up_coeff);
269 dev->boost_freq += ACTMON_BOOST_FREQ_STEP;
271 dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
273 if (dev->boost_freq >= tegra->max_freq) {
274 dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
275 dev->boost_freq = tegra->max_freq;
277 } else if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) {
279 * new_boost = old_boost * down_coef
280 * or 0 if (old_boost * down_coef < step / 2)
282 dev->boost_freq = do_percent(dev->boost_freq,
283 dev->config->boost_down_coeff);
285 dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
287 if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1)) {
288 dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
289 dev->boost_freq = 0;
293 device_writel(dev, dev_ctrl, ACTMON_DEV_CTRL);
295 device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
298 static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
299 unsigned long cpu_freq)
301 unsigned int i;
302 const struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
304 for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++) {
305 if (cpu_freq >= ratio->cpu_freq) {
306 if (ratio->emc_freq >= tegra->max_freq)
307 return tegra->max_freq;
308 else
309 return ratio->emc_freq;
313 return 0;
316 static unsigned long actmon_device_target_freq(struct tegra_devfreq *tegra,
317 struct tegra_devfreq_device *dev)
319 unsigned int avg_sustain_coef;
320 unsigned long target_freq;
322 target_freq = dev->avg_count / tegra->devfreq->profile->polling_ms;
323 avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
324 target_freq = do_percent(target_freq, avg_sustain_coef);
326 return target_freq;
329 static void actmon_update_target(struct tegra_devfreq *tegra,
330 struct tegra_devfreq_device *dev)
332 unsigned long cpu_freq = 0;
333 unsigned long static_cpu_emc_freq = 0;
335 dev->target_freq = actmon_device_target_freq(tegra, dev);
337 if (dev->config->avg_dependency_threshold &&
338 dev->config->avg_dependency_threshold <= dev->target_freq) {
339 cpu_freq = cpufreq_quick_get(0);
340 static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
342 dev->target_freq += dev->boost_freq;
343 dev->target_freq = max(dev->target_freq, static_cpu_emc_freq);
344 } else {
345 dev->target_freq += dev->boost_freq;
349 static irqreturn_t actmon_thread_isr(int irq, void *data)
351 struct tegra_devfreq *tegra = data;
352 bool handled = false;
353 unsigned int i;
354 u32 val;
356 mutex_lock(&tegra->devfreq->lock);
358 val = actmon_readl(tegra, ACTMON_GLB_STATUS);
359 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
360 if (val & tegra->devices[i].config->irq_mask) {
361 actmon_isr_device(tegra, tegra->devices + i);
362 handled = true;
366 if (handled)
367 update_devfreq(tegra->devfreq);
369 mutex_unlock(&tegra->devfreq->lock);
371 return handled ? IRQ_HANDLED : IRQ_NONE;
374 static int tegra_actmon_clk_notify_cb(struct notifier_block *nb,
375 unsigned long action, void *ptr)
377 struct clk_notifier_data *data = ptr;
378 struct tegra_devfreq *tegra;
379 struct tegra_devfreq_device *dev;
380 unsigned int i;
382 if (action != POST_RATE_CHANGE)
383 return NOTIFY_OK;
385 tegra = container_of(nb, struct tegra_devfreq, clk_rate_change_nb);
387 tegra->cur_freq = data->new_rate / KHZ;
389 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
390 dev = &tegra->devices[i];
392 tegra_devfreq_update_wmark(tegra, dev);
395 return NOTIFY_OK;
398 static void tegra_actmon_delayed_update(struct work_struct *work)
400 struct tegra_devfreq *tegra = container_of(work, struct tegra_devfreq,
401 cpufreq_update_work.work);
403 mutex_lock(&tegra->devfreq->lock);
404 update_devfreq(tegra->devfreq);
405 mutex_unlock(&tegra->devfreq->lock);
408 static unsigned long
409 tegra_actmon_cpufreq_contribution(struct tegra_devfreq *tegra,
410 unsigned int cpu_freq)
412 struct tegra_devfreq_device *actmon_dev = &tegra->devices[MCCPU];
413 unsigned long static_cpu_emc_freq, dev_freq;
415 dev_freq = actmon_device_target_freq(tegra, actmon_dev);
417 /* check whether CPU's freq is taken into account at all */
418 if (dev_freq < actmon_dev->config->avg_dependency_threshold)
419 return 0;
421 static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
423 if (dev_freq >= static_cpu_emc_freq)
424 return 0;
426 return static_cpu_emc_freq;
429 static int tegra_actmon_cpu_notify_cb(struct notifier_block *nb,
430 unsigned long action, void *ptr)
432 struct cpufreq_freqs *freqs = ptr;
433 struct tegra_devfreq *tegra;
434 unsigned long old, new, delay;
436 if (action != CPUFREQ_POSTCHANGE)
437 return NOTIFY_OK;
439 tegra = container_of(nb, struct tegra_devfreq, cpu_rate_change_nb);
442 * Quickly check whether CPU frequency should be taken into account
443 * at all, without blocking CPUFreq's core.
445 if (mutex_trylock(&tegra->devfreq->lock)) {
446 old = tegra_actmon_cpufreq_contribution(tegra, freqs->old);
447 new = tegra_actmon_cpufreq_contribution(tegra, freqs->new);
448 mutex_unlock(&tegra->devfreq->lock);
451 * If CPU's frequency shouldn't be taken into account at
452 * the moment, then there is no need to update the devfreq's
453 * state because ISR will re-check CPU's frequency on the
454 * next interrupt.
456 if (old == new)
457 return NOTIFY_OK;
461 * CPUFreq driver should support CPUFREQ_ASYNC_NOTIFICATION in order
462 * to allow asynchronous notifications. This means we can't block
463 * here for too long, otherwise CPUFreq's core will complain with a
464 * warning splat.
466 delay = msecs_to_jiffies(ACTMON_SAMPLING_PERIOD);
467 schedule_delayed_work(&tegra->cpufreq_update_work, delay);
469 return NOTIFY_OK;
472 static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
473 struct tegra_devfreq_device *dev)
475 u32 val = 0;
477 /* reset boosting on governor's restart */
478 dev->boost_freq = 0;
480 dev->target_freq = tegra->cur_freq;
482 dev->avg_count = tegra->cur_freq * tegra->devfreq->profile->polling_ms;
483 device_writel(dev, dev->avg_count, ACTMON_DEV_INIT_AVG);
485 tegra_devfreq_update_avg_wmark(tegra, dev);
486 tegra_devfreq_update_wmark(tegra, dev);
488 device_writel(dev, ACTMON_COUNT_WEIGHT, ACTMON_DEV_COUNT_WEIGHT);
489 device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
491 val |= ACTMON_DEV_CTRL_ENB_PERIODIC;
492 val |= (ACTMON_AVERAGE_WINDOW_LOG2 - 1)
493 << ACTMON_DEV_CTRL_K_VAL_SHIFT;
494 val |= (ACTMON_BELOW_WMARK_WINDOW - 1)
495 << ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT;
496 val |= (ACTMON_ABOVE_WMARK_WINDOW - 1)
497 << ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT;
498 val |= ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN;
499 val |= ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
500 val |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
501 val |= ACTMON_DEV_CTRL_ENB;
503 device_writel(dev, val, ACTMON_DEV_CTRL);
506 static void tegra_actmon_stop_devices(struct tegra_devfreq *tegra)
508 struct tegra_devfreq_device *dev = tegra->devices;
509 unsigned int i;
511 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++, dev++) {
512 device_writel(dev, ACTMON_DEV_CTRL_STOP, ACTMON_DEV_CTRL);
513 device_writel(dev, ACTMON_INTR_STATUS_CLEAR,
514 ACTMON_DEV_INTR_STATUS);
518 static int tegra_actmon_resume(struct tegra_devfreq *tegra)
520 unsigned int i;
521 int err;
523 if (!tegra->devfreq->profile->polling_ms || !tegra->started)
524 return 0;
526 actmon_writel(tegra, tegra->devfreq->profile->polling_ms - 1,
527 ACTMON_GLB_PERIOD_CTRL);
530 * CLK notifications are needed in order to reconfigure the upper
531 * consecutive watermark in accordance to the actual clock rate
532 * to avoid unnecessary upper interrupts.
534 err = clk_notifier_register(tegra->emc_clock,
535 &tegra->clk_rate_change_nb);
536 if (err) {
537 dev_err(tegra->devfreq->dev.parent,
538 "Failed to register rate change notifier\n");
539 return err;
542 tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
544 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++)
545 tegra_actmon_configure_device(tegra, &tegra->devices[i]);
548 * We are estimating CPU's memory bandwidth requirement based on
549 * amount of memory accesses and system's load, judging by CPU's
550 * frequency. We also don't want to receive events about CPU's
551 * frequency transaction when governor is stopped, hence notifier
552 * is registered dynamically.
554 err = cpufreq_register_notifier(&tegra->cpu_rate_change_nb,
555 CPUFREQ_TRANSITION_NOTIFIER);
556 if (err) {
557 dev_err(tegra->devfreq->dev.parent,
558 "Failed to register rate change notifier: %d\n", err);
559 goto err_stop;
562 enable_irq(tegra->irq);
564 return 0;
566 err_stop:
567 tegra_actmon_stop_devices(tegra);
569 clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb);
571 return err;
574 static int tegra_actmon_start(struct tegra_devfreq *tegra)
576 int ret = 0;
578 if (!tegra->started) {
579 tegra->started = true;
581 ret = tegra_actmon_resume(tegra);
582 if (ret)
583 tegra->started = false;
586 return ret;
589 static void tegra_actmon_pause(struct tegra_devfreq *tegra)
591 if (!tegra->devfreq->profile->polling_ms || !tegra->started)
592 return;
594 disable_irq(tegra->irq);
596 cpufreq_unregister_notifier(&tegra->cpu_rate_change_nb,
597 CPUFREQ_TRANSITION_NOTIFIER);
599 cancel_delayed_work_sync(&tegra->cpufreq_update_work);
601 tegra_actmon_stop_devices(tegra);
603 clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb);
606 static void tegra_actmon_stop(struct tegra_devfreq *tegra)
608 tegra_actmon_pause(tegra);
609 tegra->started = false;
612 static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
613 u32 flags)
615 struct tegra_devfreq *tegra = dev_get_drvdata(dev);
616 struct devfreq *devfreq = tegra->devfreq;
617 struct dev_pm_opp *opp;
618 unsigned long rate;
619 int err;
621 opp = devfreq_recommended_opp(dev, freq, flags);
622 if (IS_ERR(opp)) {
623 dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
624 return PTR_ERR(opp);
626 rate = dev_pm_opp_get_freq(opp);
627 dev_pm_opp_put(opp);
629 err = clk_set_min_rate(tegra->emc_clock, rate * KHZ);
630 if (err)
631 return err;
633 err = clk_set_rate(tegra->emc_clock, 0);
634 if (err)
635 goto restore_min_rate;
637 return 0;
639 restore_min_rate:
640 clk_set_min_rate(tegra->emc_clock, devfreq->previous_freq);
642 return err;
645 static int tegra_devfreq_get_dev_status(struct device *dev,
646 struct devfreq_dev_status *stat)
648 struct tegra_devfreq *tegra = dev_get_drvdata(dev);
649 struct tegra_devfreq_device *actmon_dev;
650 unsigned long cur_freq;
652 cur_freq = READ_ONCE(tegra->cur_freq);
654 /* To be used by the tegra governor */
655 stat->private_data = tegra;
657 /* The below are to be used by the other governors */
658 stat->current_frequency = cur_freq;
660 actmon_dev = &tegra->devices[MCALL];
662 /* Number of cycles spent on memory access */
663 stat->busy_time = device_readl(actmon_dev, ACTMON_DEV_AVG_COUNT);
665 /* The bus can be considered to be saturated way before 100% */
666 stat->busy_time *= 100 / BUS_SATURATION_RATIO;
668 /* Number of cycles in a sampling period */
669 stat->total_time = tegra->devfreq->profile->polling_ms * cur_freq;
671 stat->busy_time = min(stat->busy_time, stat->total_time);
673 return 0;
676 static struct devfreq_dev_profile tegra_devfreq_profile = {
677 .polling_ms = ACTMON_SAMPLING_PERIOD,
678 .target = tegra_devfreq_target,
679 .get_dev_status = tegra_devfreq_get_dev_status,
682 static int tegra_governor_get_target(struct devfreq *devfreq,
683 unsigned long *freq)
685 struct devfreq_dev_status *stat;
686 struct tegra_devfreq *tegra;
687 struct tegra_devfreq_device *dev;
688 unsigned long target_freq = 0;
689 unsigned int i;
690 int err;
692 err = devfreq_update_stats(devfreq);
693 if (err)
694 return err;
696 stat = &devfreq->last_status;
698 tegra = stat->private_data;
700 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
701 dev = &tegra->devices[i];
703 actmon_update_target(tegra, dev);
705 target_freq = max(target_freq, dev->target_freq);
708 *freq = target_freq;
710 return 0;
713 static int tegra_governor_event_handler(struct devfreq *devfreq,
714 unsigned int event, void *data)
716 struct tegra_devfreq *tegra = dev_get_drvdata(devfreq->dev.parent);
717 unsigned int *new_delay = data;
718 int ret = 0;
721 * Couple devfreq-device with the governor early because it is
722 * needed at the moment of governor's start (used by ISR).
724 tegra->devfreq = devfreq;
726 switch (event) {
727 case DEVFREQ_GOV_START:
728 devfreq_monitor_start(devfreq);
729 ret = tegra_actmon_start(tegra);
730 break;
732 case DEVFREQ_GOV_STOP:
733 tegra_actmon_stop(tegra);
734 devfreq_monitor_stop(devfreq);
735 break;
737 case DEVFREQ_GOV_INTERVAL:
739 * ACTMON hardware supports up to 256 milliseconds for the
740 * sampling period.
742 if (*new_delay > 256) {
743 ret = -EINVAL;
744 break;
747 tegra_actmon_pause(tegra);
748 devfreq_interval_update(devfreq, new_delay);
749 ret = tegra_actmon_resume(tegra);
750 break;
752 case DEVFREQ_GOV_SUSPEND:
753 tegra_actmon_stop(tegra);
754 devfreq_monitor_suspend(devfreq);
755 break;
757 case DEVFREQ_GOV_RESUME:
758 devfreq_monitor_resume(devfreq);
759 ret = tegra_actmon_start(tegra);
760 break;
763 return ret;
766 static struct devfreq_governor tegra_devfreq_governor = {
767 .name = "tegra_actmon",
768 .get_target_freq = tegra_governor_get_target,
769 .event_handler = tegra_governor_event_handler,
770 .immutable = true,
771 .interrupt_driven = true,
774 static int tegra_devfreq_probe(struct platform_device *pdev)
776 struct tegra_devfreq_device *dev;
777 struct tegra_devfreq *tegra;
778 struct devfreq *devfreq;
779 unsigned int i;
780 long rate;
781 int err;
783 tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
784 if (!tegra)
785 return -ENOMEM;
787 tegra->regs = devm_platform_ioremap_resource(pdev, 0);
788 if (IS_ERR(tegra->regs))
789 return PTR_ERR(tegra->regs);
791 tegra->reset = devm_reset_control_get(&pdev->dev, "actmon");
792 if (IS_ERR(tegra->reset)) {
793 dev_err(&pdev->dev, "Failed to get reset\n");
794 return PTR_ERR(tegra->reset);
797 tegra->clock = devm_clk_get(&pdev->dev, "actmon");
798 if (IS_ERR(tegra->clock)) {
799 dev_err(&pdev->dev, "Failed to get actmon clock\n");
800 return PTR_ERR(tegra->clock);
803 tegra->emc_clock = devm_clk_get(&pdev->dev, "emc");
804 if (IS_ERR(tegra->emc_clock)) {
805 dev_err(&pdev->dev, "Failed to get emc clock\n");
806 return PTR_ERR(tegra->emc_clock);
809 err = platform_get_irq(pdev, 0);
810 if (err < 0) {
811 dev_err(&pdev->dev, "Failed to get IRQ: %d\n", err);
812 return err;
814 tegra->irq = err;
816 irq_set_status_flags(tegra->irq, IRQ_NOAUTOEN);
818 err = devm_request_threaded_irq(&pdev->dev, tegra->irq, NULL,
819 actmon_thread_isr, IRQF_ONESHOT,
820 "tegra-devfreq", tegra);
821 if (err) {
822 dev_err(&pdev->dev, "Interrupt request failed: %d\n", err);
823 return err;
826 reset_control_assert(tegra->reset);
828 err = clk_prepare_enable(tegra->clock);
829 if (err) {
830 dev_err(&pdev->dev,
831 "Failed to prepare and enable ACTMON clock\n");
832 return err;
835 reset_control_deassert(tegra->reset);
837 rate = clk_round_rate(tegra->emc_clock, ULONG_MAX);
838 if (rate < 0) {
839 dev_err(&pdev->dev, "Failed to round clock rate: %ld\n", rate);
840 return rate;
843 tegra->max_freq = rate / KHZ;
845 for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) {
846 dev = tegra->devices + i;
847 dev->config = actmon_device_configs + i;
848 dev->regs = tegra->regs + dev->config->offset;
851 for (rate = 0; rate <= tegra->max_freq * KHZ; rate++) {
852 rate = clk_round_rate(tegra->emc_clock, rate);
854 if (rate < 0) {
855 dev_err(&pdev->dev,
856 "Failed to round clock rate: %ld\n", rate);
857 err = rate;
858 goto remove_opps;
861 err = dev_pm_opp_add(&pdev->dev, rate / KHZ, 0);
862 if (err) {
863 dev_err(&pdev->dev, "Failed to add OPP: %d\n", err);
864 goto remove_opps;
868 platform_set_drvdata(pdev, tegra);
870 tegra->clk_rate_change_nb.notifier_call = tegra_actmon_clk_notify_cb;
871 tegra->cpu_rate_change_nb.notifier_call = tegra_actmon_cpu_notify_cb;
873 INIT_DELAYED_WORK(&tegra->cpufreq_update_work,
874 tegra_actmon_delayed_update);
876 err = devfreq_add_governor(&tegra_devfreq_governor);
877 if (err) {
878 dev_err(&pdev->dev, "Failed to add governor: %d\n", err);
879 goto remove_opps;
882 tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock);
883 tegra_devfreq_profile.initial_freq /= KHZ;
885 devfreq = devfreq_add_device(&pdev->dev, &tegra_devfreq_profile,
886 "tegra_actmon", NULL);
887 if (IS_ERR(devfreq)) {
888 err = PTR_ERR(devfreq);
889 goto remove_governor;
892 return 0;
894 remove_governor:
895 devfreq_remove_governor(&tegra_devfreq_governor);
897 remove_opps:
898 dev_pm_opp_remove_all_dynamic(&pdev->dev);
900 reset_control_reset(tegra->reset);
901 clk_disable_unprepare(tegra->clock);
903 return err;
906 static int tegra_devfreq_remove(struct platform_device *pdev)
908 struct tegra_devfreq *tegra = platform_get_drvdata(pdev);
910 devfreq_remove_device(tegra->devfreq);
911 devfreq_remove_governor(&tegra_devfreq_governor);
913 dev_pm_opp_remove_all_dynamic(&pdev->dev);
915 reset_control_reset(tegra->reset);
916 clk_disable_unprepare(tegra->clock);
918 return 0;
921 static const struct of_device_id tegra_devfreq_of_match[] = {
922 { .compatible = "nvidia,tegra30-actmon" },
923 { .compatible = "nvidia,tegra124-actmon" },
924 { },
927 MODULE_DEVICE_TABLE(of, tegra_devfreq_of_match);
929 static struct platform_driver tegra_devfreq_driver = {
930 .probe = tegra_devfreq_probe,
931 .remove = tegra_devfreq_remove,
932 .driver = {
933 .name = "tegra-devfreq",
934 .of_match_table = tegra_devfreq_of_match,
937 module_platform_driver(tegra_devfreq_driver);
939 MODULE_LICENSE("GPL v2");
940 MODULE_DESCRIPTION("Tegra devfreq driver");
941 MODULE_AUTHOR("Tomeu Vizoso <tomeu.vizoso@collabora.com>");