xen-netback: correctly schedule rate-limited queues
[linux/fpc-iii.git] / drivers / soc / rockchip / pm_domains.c
blob7acd1517dd372cf2922096be88a475f23a4de856
1 /*
2 * Rockchip Generic power domain support.
4 * Copyright (c) 2015 ROCKCHIP, Co. Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
11 #include <linux/io.h>
12 #include <linux/err.h>
13 #include <linux/pm_clock.h>
14 #include <linux/pm_domain.h>
15 #include <linux/of_address.h>
16 #include <linux/of_platform.h>
17 #include <linux/clk.h>
18 #include <linux/regmap.h>
19 #include <linux/mfd/syscon.h>
20 #include <dt-bindings/power/rk3288-power.h>
21 #include <dt-bindings/power/rk3368-power.h>
22 #include <dt-bindings/power/rk3399-power.h>
24 struct rockchip_domain_info {
25 int pwr_mask;
26 int status_mask;
27 int req_mask;
28 int idle_mask;
29 int ack_mask;
30 bool active_wakeup;
33 struct rockchip_pmu_info {
34 u32 pwr_offset;
35 u32 status_offset;
36 u32 req_offset;
37 u32 idle_offset;
38 u32 ack_offset;
40 u32 core_pwrcnt_offset;
41 u32 gpu_pwrcnt_offset;
43 unsigned int core_power_transition_time;
44 unsigned int gpu_power_transition_time;
46 int num_domains;
47 const struct rockchip_domain_info *domain_info;
50 #define MAX_QOS_REGS_NUM 5
51 #define QOS_PRIORITY 0x08
52 #define QOS_MODE 0x0c
53 #define QOS_BANDWIDTH 0x10
54 #define QOS_SATURATION 0x14
55 #define QOS_EXTCONTROL 0x18
57 struct rockchip_pm_domain {
58 struct generic_pm_domain genpd;
59 const struct rockchip_domain_info *info;
60 struct rockchip_pmu *pmu;
61 int num_qos;
62 struct regmap **qos_regmap;
63 u32 *qos_save_regs[MAX_QOS_REGS_NUM];
64 int num_clks;
65 struct clk *clks[];
68 struct rockchip_pmu {
69 struct device *dev;
70 struct regmap *regmap;
71 const struct rockchip_pmu_info *info;
72 struct mutex mutex; /* mutex lock for pmu */
73 struct genpd_onecell_data genpd_data;
74 struct generic_pm_domain *domains[];
77 #define to_rockchip_pd(gpd) container_of(gpd, struct rockchip_pm_domain, genpd)
79 #define DOMAIN(pwr, status, req, idle, ack, wakeup) \
80 { \
81 .pwr_mask = (pwr >= 0) ? BIT(pwr) : 0, \
82 .status_mask = (status >= 0) ? BIT(status) : 0, \
83 .req_mask = (req >= 0) ? BIT(req) : 0, \
84 .idle_mask = (idle >= 0) ? BIT(idle) : 0, \
85 .ack_mask = (ack >= 0) ? BIT(ack) : 0, \
86 .active_wakeup = wakeup, \
89 #define DOMAIN_RK3288(pwr, status, req, wakeup) \
90 DOMAIN(pwr, status, req, req, (req) + 16, wakeup)
92 #define DOMAIN_RK3368(pwr, status, req, wakeup) \
93 DOMAIN(pwr, status, req, (req) + 16, req, wakeup)
95 #define DOMAIN_RK3399(pwr, status, req, wakeup) \
96 DOMAIN(pwr, status, req, req, req, wakeup)
98 static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
100 struct rockchip_pmu *pmu = pd->pmu;
101 const struct rockchip_domain_info *pd_info = pd->info;
102 unsigned int val;
104 regmap_read(pmu->regmap, pmu->info->idle_offset, &val);
105 return (val & pd_info->idle_mask) == pd_info->idle_mask;
108 static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
109 bool idle)
111 const struct rockchip_domain_info *pd_info = pd->info;
112 struct rockchip_pmu *pmu = pd->pmu;
113 unsigned int val;
115 if (pd_info->req_mask == 0)
116 return 0;
118 regmap_update_bits(pmu->regmap, pmu->info->req_offset,
119 pd_info->req_mask, idle ? -1U : 0);
121 dsb(sy);
123 do {
124 regmap_read(pmu->regmap, pmu->info->ack_offset, &val);
125 } while ((val & pd_info->ack_mask) != (idle ? pd_info->ack_mask : 0));
127 while (rockchip_pmu_domain_is_idle(pd) != idle)
128 cpu_relax();
130 return 0;
133 static int rockchip_pmu_save_qos(struct rockchip_pm_domain *pd)
135 int i;
137 for (i = 0; i < pd->num_qos; i++) {
138 regmap_read(pd->qos_regmap[i],
139 QOS_PRIORITY,
140 &pd->qos_save_regs[0][i]);
141 regmap_read(pd->qos_regmap[i],
142 QOS_MODE,
143 &pd->qos_save_regs[1][i]);
144 regmap_read(pd->qos_regmap[i],
145 QOS_BANDWIDTH,
146 &pd->qos_save_regs[2][i]);
147 regmap_read(pd->qos_regmap[i],
148 QOS_SATURATION,
149 &pd->qos_save_regs[3][i]);
150 regmap_read(pd->qos_regmap[i],
151 QOS_EXTCONTROL,
152 &pd->qos_save_regs[4][i]);
154 return 0;
157 static int rockchip_pmu_restore_qos(struct rockchip_pm_domain *pd)
159 int i;
161 for (i = 0; i < pd->num_qos; i++) {
162 regmap_write(pd->qos_regmap[i],
163 QOS_PRIORITY,
164 pd->qos_save_regs[0][i]);
165 regmap_write(pd->qos_regmap[i],
166 QOS_MODE,
167 pd->qos_save_regs[1][i]);
168 regmap_write(pd->qos_regmap[i],
169 QOS_BANDWIDTH,
170 pd->qos_save_regs[2][i]);
171 regmap_write(pd->qos_regmap[i],
172 QOS_SATURATION,
173 pd->qos_save_regs[3][i]);
174 regmap_write(pd->qos_regmap[i],
175 QOS_EXTCONTROL,
176 pd->qos_save_regs[4][i]);
179 return 0;
182 static bool rockchip_pmu_domain_is_on(struct rockchip_pm_domain *pd)
184 struct rockchip_pmu *pmu = pd->pmu;
185 unsigned int val;
187 /* check idle status for idle-only domains */
188 if (pd->info->status_mask == 0)
189 return !rockchip_pmu_domain_is_idle(pd);
191 regmap_read(pmu->regmap, pmu->info->status_offset, &val);
193 /* 1'b0: power on, 1'b1: power off */
194 return !(val & pd->info->status_mask);
197 static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
198 bool on)
200 struct rockchip_pmu *pmu = pd->pmu;
202 if (pd->info->pwr_mask == 0)
203 return;
205 regmap_update_bits(pmu->regmap, pmu->info->pwr_offset,
206 pd->info->pwr_mask, on ? 0 : -1U);
208 dsb(sy);
210 while (rockchip_pmu_domain_is_on(pd) != on)
211 cpu_relax();
214 static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
216 int i;
218 mutex_lock(&pd->pmu->mutex);
220 if (rockchip_pmu_domain_is_on(pd) != power_on) {
221 for (i = 0; i < pd->num_clks; i++)
222 clk_enable(pd->clks[i]);
224 if (!power_on) {
225 rockchip_pmu_save_qos(pd);
227 /* if powering down, idle request to NIU first */
228 rockchip_pmu_set_idle_request(pd, true);
231 rockchip_do_pmu_set_power_domain(pd, power_on);
233 if (power_on) {
234 /* if powering up, leave idle mode */
235 rockchip_pmu_set_idle_request(pd, false);
237 rockchip_pmu_restore_qos(pd);
240 for (i = pd->num_clks - 1; i >= 0; i--)
241 clk_disable(pd->clks[i]);
244 mutex_unlock(&pd->pmu->mutex);
245 return 0;
248 static int rockchip_pd_power_on(struct generic_pm_domain *domain)
250 struct rockchip_pm_domain *pd = to_rockchip_pd(domain);
252 return rockchip_pd_power(pd, true);
255 static int rockchip_pd_power_off(struct generic_pm_domain *domain)
257 struct rockchip_pm_domain *pd = to_rockchip_pd(domain);
259 return rockchip_pd_power(pd, false);
262 static int rockchip_pd_attach_dev(struct generic_pm_domain *genpd,
263 struct device *dev)
265 struct clk *clk;
266 int i;
267 int error;
269 dev_dbg(dev, "attaching to power domain '%s'\n", genpd->name);
271 error = pm_clk_create(dev);
272 if (error) {
273 dev_err(dev, "pm_clk_create failed %d\n", error);
274 return error;
277 i = 0;
278 while ((clk = of_clk_get(dev->of_node, i++)) && !IS_ERR(clk)) {
279 dev_dbg(dev, "adding clock '%pC' to list of PM clocks\n", clk);
280 error = pm_clk_add_clk(dev, clk);
281 if (error) {
282 dev_err(dev, "pm_clk_add_clk failed %d\n", error);
283 clk_put(clk);
284 pm_clk_destroy(dev);
285 return error;
289 return 0;
292 static void rockchip_pd_detach_dev(struct generic_pm_domain *genpd,
293 struct device *dev)
295 dev_dbg(dev, "detaching from power domain '%s'\n", genpd->name);
297 pm_clk_destroy(dev);
300 static bool rockchip_active_wakeup(struct device *dev)
302 struct generic_pm_domain *genpd;
303 struct rockchip_pm_domain *pd;
305 genpd = pd_to_genpd(dev->pm_domain);
306 pd = container_of(genpd, struct rockchip_pm_domain, genpd);
308 return pd->info->active_wakeup;
311 static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
312 struct device_node *node)
314 const struct rockchip_domain_info *pd_info;
315 struct rockchip_pm_domain *pd;
316 struct device_node *qos_node;
317 struct clk *clk;
318 int clk_cnt;
319 int i, j;
320 u32 id;
321 int error;
323 error = of_property_read_u32(node, "reg", &id);
324 if (error) {
325 dev_err(pmu->dev,
326 "%s: failed to retrieve domain id (reg): %d\n",
327 node->name, error);
328 return -EINVAL;
331 if (id >= pmu->info->num_domains) {
332 dev_err(pmu->dev, "%s: invalid domain id %d\n",
333 node->name, id);
334 return -EINVAL;
337 pd_info = &pmu->info->domain_info[id];
338 if (!pd_info) {
339 dev_err(pmu->dev, "%s: undefined domain id %d\n",
340 node->name, id);
341 return -EINVAL;
344 clk_cnt = of_count_phandle_with_args(node, "clocks", "#clock-cells");
345 pd = devm_kzalloc(pmu->dev,
346 sizeof(*pd) + clk_cnt * sizeof(pd->clks[0]),
347 GFP_KERNEL);
348 if (!pd)
349 return -ENOMEM;
351 pd->info = pd_info;
352 pd->pmu = pmu;
354 for (i = 0; i < clk_cnt; i++) {
355 clk = of_clk_get(node, i);
356 if (IS_ERR(clk)) {
357 error = PTR_ERR(clk);
358 dev_err(pmu->dev,
359 "%s: failed to get clk at index %d: %d\n",
360 node->name, i, error);
361 goto err_out;
364 error = clk_prepare(clk);
365 if (error) {
366 dev_err(pmu->dev,
367 "%s: failed to prepare clk %pC (index %d): %d\n",
368 node->name, clk, i, error);
369 clk_put(clk);
370 goto err_out;
373 pd->clks[pd->num_clks++] = clk;
375 dev_dbg(pmu->dev, "added clock '%pC' to domain '%s'\n",
376 clk, node->name);
379 pd->num_qos = of_count_phandle_with_args(node, "pm_qos",
380 NULL);
382 if (pd->num_qos > 0) {
383 pd->qos_regmap = devm_kcalloc(pmu->dev, pd->num_qos,
384 sizeof(*pd->qos_regmap),
385 GFP_KERNEL);
386 if (!pd->qos_regmap) {
387 error = -ENOMEM;
388 goto err_out;
391 for (j = 0; j < MAX_QOS_REGS_NUM; j++) {
392 pd->qos_save_regs[j] = devm_kcalloc(pmu->dev,
393 pd->num_qos,
394 sizeof(u32),
395 GFP_KERNEL);
396 if (!pd->qos_save_regs[j]) {
397 error = -ENOMEM;
398 goto err_out;
402 for (j = 0; j < pd->num_qos; j++) {
403 qos_node = of_parse_phandle(node, "pm_qos", j);
404 if (!qos_node) {
405 error = -ENODEV;
406 goto err_out;
408 pd->qos_regmap[j] = syscon_node_to_regmap(qos_node);
409 if (IS_ERR(pd->qos_regmap[j])) {
410 error = -ENODEV;
411 of_node_put(qos_node);
412 goto err_out;
414 of_node_put(qos_node);
418 error = rockchip_pd_power(pd, true);
419 if (error) {
420 dev_err(pmu->dev,
421 "failed to power on domain '%s': %d\n",
422 node->name, error);
423 goto err_out;
426 pd->genpd.name = node->name;
427 pd->genpd.power_off = rockchip_pd_power_off;
428 pd->genpd.power_on = rockchip_pd_power_on;
429 pd->genpd.attach_dev = rockchip_pd_attach_dev;
430 pd->genpd.detach_dev = rockchip_pd_detach_dev;
431 pd->genpd.dev_ops.active_wakeup = rockchip_active_wakeup;
432 pd->genpd.flags = GENPD_FLAG_PM_CLK;
433 pm_genpd_init(&pd->genpd, NULL, false);
435 pmu->genpd_data.domains[id] = &pd->genpd;
436 return 0;
438 err_out:
439 while (--i >= 0) {
440 clk_unprepare(pd->clks[i]);
441 clk_put(pd->clks[i]);
443 return error;
446 static void rockchip_pm_remove_one_domain(struct rockchip_pm_domain *pd)
448 int i;
450 for (i = 0; i < pd->num_clks; i++) {
451 clk_unprepare(pd->clks[i]);
452 clk_put(pd->clks[i]);
455 /* protect the zeroing of pm->num_clks */
456 mutex_lock(&pd->pmu->mutex);
457 pd->num_clks = 0;
458 mutex_unlock(&pd->pmu->mutex);
460 /* devm will free our memory */
463 static void rockchip_pm_domain_cleanup(struct rockchip_pmu *pmu)
465 struct generic_pm_domain *genpd;
466 struct rockchip_pm_domain *pd;
467 int i;
469 for (i = 0; i < pmu->genpd_data.num_domains; i++) {
470 genpd = pmu->genpd_data.domains[i];
471 if (genpd) {
472 pd = to_rockchip_pd(genpd);
473 rockchip_pm_remove_one_domain(pd);
477 /* devm will free our memory */
480 static void rockchip_configure_pd_cnt(struct rockchip_pmu *pmu,
481 u32 domain_reg_offset,
482 unsigned int count)
484 /* First configure domain power down transition count ... */
485 regmap_write(pmu->regmap, domain_reg_offset, count);
486 /* ... and then power up count. */
487 regmap_write(pmu->regmap, domain_reg_offset + 4, count);
490 static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
491 struct device_node *parent)
493 struct device_node *np;
494 struct generic_pm_domain *child_domain, *parent_domain;
495 int error;
497 for_each_child_of_node(parent, np) {
498 u32 idx;
500 error = of_property_read_u32(parent, "reg", &idx);
501 if (error) {
502 dev_err(pmu->dev,
503 "%s: failed to retrieve domain id (reg): %d\n",
504 parent->name, error);
505 goto err_out;
507 parent_domain = pmu->genpd_data.domains[idx];
509 error = rockchip_pm_add_one_domain(pmu, np);
510 if (error) {
511 dev_err(pmu->dev, "failed to handle node %s: %d\n",
512 np->name, error);
513 goto err_out;
516 error = of_property_read_u32(np, "reg", &idx);
517 if (error) {
518 dev_err(pmu->dev,
519 "%s: failed to retrieve domain id (reg): %d\n",
520 np->name, error);
521 goto err_out;
523 child_domain = pmu->genpd_data.domains[idx];
525 error = pm_genpd_add_subdomain(parent_domain, child_domain);
526 if (error) {
527 dev_err(pmu->dev, "%s failed to add subdomain %s: %d\n",
528 parent_domain->name, child_domain->name, error);
529 goto err_out;
530 } else {
531 dev_dbg(pmu->dev, "%s add subdomain: %s\n",
532 parent_domain->name, child_domain->name);
535 rockchip_pm_add_subdomain(pmu, np);
538 return 0;
540 err_out:
541 of_node_put(np);
542 return error;
545 static int rockchip_pm_domain_probe(struct platform_device *pdev)
547 struct device *dev = &pdev->dev;
548 struct device_node *np = dev->of_node;
549 struct device_node *node;
550 struct device *parent;
551 struct rockchip_pmu *pmu;
552 const struct of_device_id *match;
553 const struct rockchip_pmu_info *pmu_info;
554 int error;
556 if (!np) {
557 dev_err(dev, "device tree node not found\n");
558 return -ENODEV;
561 match = of_match_device(dev->driver->of_match_table, dev);
562 if (!match || !match->data) {
563 dev_err(dev, "missing pmu data\n");
564 return -EINVAL;
567 pmu_info = match->data;
569 pmu = devm_kzalloc(dev,
570 sizeof(*pmu) +
571 pmu_info->num_domains * sizeof(pmu->domains[0]),
572 GFP_KERNEL);
573 if (!pmu)
574 return -ENOMEM;
576 pmu->dev = &pdev->dev;
577 mutex_init(&pmu->mutex);
579 pmu->info = pmu_info;
581 pmu->genpd_data.domains = pmu->domains;
582 pmu->genpd_data.num_domains = pmu_info->num_domains;
584 parent = dev->parent;
585 if (!parent) {
586 dev_err(dev, "no parent for syscon devices\n");
587 return -ENODEV;
590 pmu->regmap = syscon_node_to_regmap(parent->of_node);
591 if (IS_ERR(pmu->regmap)) {
592 dev_err(dev, "no regmap available\n");
593 return PTR_ERR(pmu->regmap);
597 * Configure power up and down transition delays for CORE
598 * and GPU domains.
600 rockchip_configure_pd_cnt(pmu, pmu_info->core_pwrcnt_offset,
601 pmu_info->core_power_transition_time);
602 rockchip_configure_pd_cnt(pmu, pmu_info->gpu_pwrcnt_offset,
603 pmu_info->gpu_power_transition_time);
605 error = -ENODEV;
607 for_each_available_child_of_node(np, node) {
608 error = rockchip_pm_add_one_domain(pmu, node);
609 if (error) {
610 dev_err(dev, "failed to handle node %s: %d\n",
611 node->name, error);
612 of_node_put(node);
613 goto err_out;
616 error = rockchip_pm_add_subdomain(pmu, node);
617 if (error < 0) {
618 dev_err(dev, "failed to handle subdomain node %s: %d\n",
619 node->name, error);
620 of_node_put(node);
621 goto err_out;
625 if (error) {
626 dev_dbg(dev, "no power domains defined\n");
627 goto err_out;
630 of_genpd_add_provider_onecell(np, &pmu->genpd_data);
632 return 0;
634 err_out:
635 rockchip_pm_domain_cleanup(pmu);
636 return error;
639 static const struct rockchip_domain_info rk3288_pm_domains[] = {
640 [RK3288_PD_VIO] = DOMAIN_RK3288(7, 7, 4, false),
641 [RK3288_PD_HEVC] = DOMAIN_RK3288(14, 10, 9, false),
642 [RK3288_PD_VIDEO] = DOMAIN_RK3288(8, 8, 3, false),
643 [RK3288_PD_GPU] = DOMAIN_RK3288(9, 9, 2, false),
646 static const struct rockchip_domain_info rk3368_pm_domains[] = {
647 [RK3368_PD_PERI] = DOMAIN_RK3368(13, 12, 6, true),
648 [RK3368_PD_VIO] = DOMAIN_RK3368(15, 14, 8, false),
649 [RK3368_PD_VIDEO] = DOMAIN_RK3368(14, 13, 7, false),
650 [RK3368_PD_GPU_0] = DOMAIN_RK3368(16, 15, 2, false),
651 [RK3368_PD_GPU_1] = DOMAIN_RK3368(17, 16, 2, false),
654 static const struct rockchip_domain_info rk3399_pm_domains[] = {
655 [RK3399_PD_TCPD0] = DOMAIN_RK3399(8, 8, -1, false),
656 [RK3399_PD_TCPD1] = DOMAIN_RK3399(9, 9, -1, false),
657 [RK3399_PD_CCI] = DOMAIN_RK3399(10, 10, -1, true),
658 [RK3399_PD_CCI0] = DOMAIN_RK3399(-1, -1, 15, true),
659 [RK3399_PD_CCI1] = DOMAIN_RK3399(-1, -1, 16, true),
660 [RK3399_PD_PERILP] = DOMAIN_RK3399(11, 11, 1, true),
661 [RK3399_PD_PERIHP] = DOMAIN_RK3399(12, 12, 2, true),
662 [RK3399_PD_CENTER] = DOMAIN_RK3399(13, 13, 14, true),
663 [RK3399_PD_VIO] = DOMAIN_RK3399(14, 14, 17, false),
664 [RK3399_PD_GPU] = DOMAIN_RK3399(15, 15, 0, false),
665 [RK3399_PD_VCODEC] = DOMAIN_RK3399(16, 16, 3, false),
666 [RK3399_PD_VDU] = DOMAIN_RK3399(17, 17, 4, false),
667 [RK3399_PD_RGA] = DOMAIN_RK3399(18, 18, 5, false),
668 [RK3399_PD_IEP] = DOMAIN_RK3399(19, 19, 6, false),
669 [RK3399_PD_VO] = DOMAIN_RK3399(20, 20, -1, false),
670 [RK3399_PD_VOPB] = DOMAIN_RK3399(-1, -1, 7, false),
671 [RK3399_PD_VOPL] = DOMAIN_RK3399(-1, -1, 8, false),
672 [RK3399_PD_ISP0] = DOMAIN_RK3399(22, 22, 9, false),
673 [RK3399_PD_ISP1] = DOMAIN_RK3399(23, 23, 10, false),
674 [RK3399_PD_HDCP] = DOMAIN_RK3399(24, 24, 11, false),
675 [RK3399_PD_GMAC] = DOMAIN_RK3399(25, 25, 23, true),
676 [RK3399_PD_EMMC] = DOMAIN_RK3399(26, 26, 24, true),
677 [RK3399_PD_USB3] = DOMAIN_RK3399(27, 27, 12, true),
678 [RK3399_PD_EDP] = DOMAIN_RK3399(28, 28, 22, false),
679 [RK3399_PD_GIC] = DOMAIN_RK3399(29, 29, 27, true),
680 [RK3399_PD_SD] = DOMAIN_RK3399(30, 30, 28, true),
681 [RK3399_PD_SDIOAUDIO] = DOMAIN_RK3399(31, 31, 29, true),
684 static const struct rockchip_pmu_info rk3288_pmu = {
685 .pwr_offset = 0x08,
686 .status_offset = 0x0c,
687 .req_offset = 0x10,
688 .idle_offset = 0x14,
689 .ack_offset = 0x14,
691 .core_pwrcnt_offset = 0x34,
692 .gpu_pwrcnt_offset = 0x3c,
694 .core_power_transition_time = 24, /* 1us */
695 .gpu_power_transition_time = 24, /* 1us */
697 .num_domains = ARRAY_SIZE(rk3288_pm_domains),
698 .domain_info = rk3288_pm_domains,
701 static const struct rockchip_pmu_info rk3368_pmu = {
702 .pwr_offset = 0x0c,
703 .status_offset = 0x10,
704 .req_offset = 0x3c,
705 .idle_offset = 0x40,
706 .ack_offset = 0x40,
708 .core_pwrcnt_offset = 0x48,
709 .gpu_pwrcnt_offset = 0x50,
711 .core_power_transition_time = 24,
712 .gpu_power_transition_time = 24,
714 .num_domains = ARRAY_SIZE(rk3368_pm_domains),
715 .domain_info = rk3368_pm_domains,
718 static const struct rockchip_pmu_info rk3399_pmu = {
719 .pwr_offset = 0x14,
720 .status_offset = 0x18,
721 .req_offset = 0x60,
722 .idle_offset = 0x64,
723 .ack_offset = 0x68,
725 .core_pwrcnt_offset = 0x9c,
726 .gpu_pwrcnt_offset = 0xa4,
728 .core_power_transition_time = 24,
729 .gpu_power_transition_time = 24,
731 .num_domains = ARRAY_SIZE(rk3399_pm_domains),
732 .domain_info = rk3399_pm_domains,
735 static const struct of_device_id rockchip_pm_domain_dt_match[] = {
737 .compatible = "rockchip,rk3288-power-controller",
738 .data = (void *)&rk3288_pmu,
741 .compatible = "rockchip,rk3368-power-controller",
742 .data = (void *)&rk3368_pmu,
745 .compatible = "rockchip,rk3399-power-controller",
746 .data = (void *)&rk3399_pmu,
748 { /* sentinel */ },
751 static struct platform_driver rockchip_pm_domain_driver = {
752 .probe = rockchip_pm_domain_probe,
753 .driver = {
754 .name = "rockchip-pm-domain",
755 .of_match_table = rockchip_pm_domain_dt_match,
757 * We can't forcibly eject devices form power domain,
758 * so we can't really remove power domains once they
759 * were added.
761 .suppress_bind_attrs = true,
765 static int __init rockchip_pm_domain_drv_register(void)
767 return platform_driver_register(&rockchip_pm_domain_driver);
769 postcore_initcall(rockchip_pm_domain_drv_register);