treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / soc / qcom / rpmhpd.c
blob5741ec3fa814c227a76cb2bd3ff407c7ede31a9b
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/
4 #include <linux/err.h>
5 #include <linux/init.h>
6 #include <linux/kernel.h>
7 #include <linux/mutex.h>
8 #include <linux/pm_domain.h>
9 #include <linux/slab.h>
10 #include <linux/of.h>
11 #include <linux/of_device.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_opp.h>
14 #include <soc/qcom/cmd-db.h>
15 #include <soc/qcom/rpmh.h>
16 #include <dt-bindings/power/qcom-rpmpd.h>
18 #define domain_to_rpmhpd(domain) container_of(domain, struct rpmhpd, pd)
20 #define RPMH_ARC_MAX_LEVELS 16
22 /**
23 * struct rpmhpd - top level RPMh power domain resource data structure
24 * @dev: rpmh power domain controller device
25 * @pd: generic_pm_domain corrresponding to the power domain
26 * @peer: A peer power domain in case Active only Voting is
27 * supported
28 * @active_only: True if it represents an Active only peer
29 * @level: An array of level (vlvl) to corner (hlvl) mappings
30 * derived from cmd-db
31 * @level_count: Number of levels supported by the power domain. max
32 * being 16 (0 - 15)
33 * @enabled: true if the power domain is enabled
34 * @res_name: Resource name used for cmd-db lookup
35 * @addr: Resource address as looped up using resource name from
36 * cmd-db
38 struct rpmhpd {
39 struct device *dev;
40 struct generic_pm_domain pd;
41 struct generic_pm_domain *parent;
42 struct rpmhpd *peer;
43 const bool active_only;
44 unsigned int corner;
45 unsigned int active_corner;
46 u32 level[RPMH_ARC_MAX_LEVELS];
47 size_t level_count;
48 bool enabled;
49 const char *res_name;
50 u32 addr;
53 struct rpmhpd_desc {
54 struct rpmhpd **rpmhpds;
55 size_t num_pds;
58 static DEFINE_MUTEX(rpmhpd_lock);
60 /* SDM845 RPMH powerdomains */
62 static struct rpmhpd sdm845_ebi = {
63 .pd = { .name = "ebi", },
64 .res_name = "ebi.lvl",
67 static struct rpmhpd sdm845_lmx = {
68 .pd = { .name = "lmx", },
69 .res_name = "lmx.lvl",
72 static struct rpmhpd sdm845_lcx = {
73 .pd = { .name = "lcx", },
74 .res_name = "lcx.lvl",
77 static struct rpmhpd sdm845_gfx = {
78 .pd = { .name = "gfx", },
79 .res_name = "gfx.lvl",
82 static struct rpmhpd sdm845_mss = {
83 .pd = { .name = "mss", },
84 .res_name = "mss.lvl",
87 static struct rpmhpd sdm845_mx_ao;
88 static struct rpmhpd sdm845_mx = {
89 .pd = { .name = "mx", },
90 .peer = &sdm845_mx_ao,
91 .res_name = "mx.lvl",
94 static struct rpmhpd sdm845_mx_ao = {
95 .pd = { .name = "mx_ao", },
96 .peer = &sdm845_mx,
97 .res_name = "mx.lvl",
100 static struct rpmhpd sdm845_cx_ao;
101 static struct rpmhpd sdm845_cx = {
102 .pd = { .name = "cx", },
103 .peer = &sdm845_cx_ao,
104 .parent = &sdm845_mx.pd,
105 .res_name = "cx.lvl",
108 static struct rpmhpd sdm845_cx_ao = {
109 .pd = { .name = "cx_ao", },
110 .peer = &sdm845_cx,
111 .parent = &sdm845_mx_ao.pd,
112 .res_name = "cx.lvl",
115 static struct rpmhpd *sdm845_rpmhpds[] = {
116 [SDM845_EBI] = &sdm845_ebi,
117 [SDM845_MX] = &sdm845_mx,
118 [SDM845_MX_AO] = &sdm845_mx_ao,
119 [SDM845_CX] = &sdm845_cx,
120 [SDM845_CX_AO] = &sdm845_cx_ao,
121 [SDM845_LMX] = &sdm845_lmx,
122 [SDM845_LCX] = &sdm845_lcx,
123 [SDM845_GFX] = &sdm845_gfx,
124 [SDM845_MSS] = &sdm845_mss,
127 static const struct rpmhpd_desc sdm845_desc = {
128 .rpmhpds = sdm845_rpmhpds,
129 .num_pds = ARRAY_SIZE(sdm845_rpmhpds),
132 static const struct of_device_id rpmhpd_match_table[] = {
133 { .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
137 static int rpmhpd_send_corner(struct rpmhpd *pd, int state,
138 unsigned int corner, bool sync)
140 struct tcs_cmd cmd = {
141 .addr = pd->addr,
142 .data = corner,
146 * Wait for an ack only when we are increasing the
147 * perf state of the power domain
149 if (sync)
150 return rpmh_write(pd->dev, state, &cmd, 1);
151 else
152 return rpmh_write_async(pd->dev, state, &cmd, 1);
155 static void to_active_sleep(struct rpmhpd *pd, unsigned int corner,
156 unsigned int *active, unsigned int *sleep)
158 *active = corner;
160 if (pd->active_only)
161 *sleep = 0;
162 else
163 *sleep = *active;
167 * This function is used to aggregate the votes across the active only
168 * resources and its peers. The aggregated votes are sent to RPMh as
169 * ACTIVE_ONLY votes (which take effect immediately), as WAKE_ONLY votes
170 * (applied by RPMh on system wakeup) and as SLEEP votes (applied by RPMh
171 * on system sleep).
172 * We send ACTIVE_ONLY votes for resources without any peers. For others,
173 * which have an active only peer, all 3 votes are sent.
175 static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
177 int ret;
178 struct rpmhpd *peer = pd->peer;
179 unsigned int active_corner, sleep_corner;
180 unsigned int this_active_corner = 0, this_sleep_corner = 0;
181 unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
183 to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner);
185 if (peer && peer->enabled)
186 to_active_sleep(peer, peer->corner, &peer_active_corner,
187 &peer_sleep_corner);
189 active_corner = max(this_active_corner, peer_active_corner);
191 ret = rpmhpd_send_corner(pd, RPMH_ACTIVE_ONLY_STATE, active_corner,
192 active_corner > pd->active_corner);
193 if (ret)
194 return ret;
196 pd->active_corner = active_corner;
198 if (peer) {
199 peer->active_corner = active_corner;
201 ret = rpmhpd_send_corner(pd, RPMH_WAKE_ONLY_STATE,
202 active_corner, false);
203 if (ret)
204 return ret;
206 sleep_corner = max(this_sleep_corner, peer_sleep_corner);
208 return rpmhpd_send_corner(pd, RPMH_SLEEP_STATE, sleep_corner,
209 false);
212 return ret;
215 static int rpmhpd_power_on(struct generic_pm_domain *domain)
217 struct rpmhpd *pd = domain_to_rpmhpd(domain);
218 int ret = 0;
220 mutex_lock(&rpmhpd_lock);
222 if (pd->corner)
223 ret = rpmhpd_aggregate_corner(pd, pd->corner);
225 if (!ret)
226 pd->enabled = true;
228 mutex_unlock(&rpmhpd_lock);
230 return ret;
233 static int rpmhpd_power_off(struct generic_pm_domain *domain)
235 struct rpmhpd *pd = domain_to_rpmhpd(domain);
236 int ret = 0;
238 mutex_lock(&rpmhpd_lock);
240 ret = rpmhpd_aggregate_corner(pd, pd->level[0]);
242 if (!ret)
243 pd->enabled = false;
245 mutex_unlock(&rpmhpd_lock);
247 return ret;
250 static int rpmhpd_set_performance_state(struct generic_pm_domain *domain,
251 unsigned int level)
253 struct rpmhpd *pd = domain_to_rpmhpd(domain);
254 int ret = 0, i;
256 mutex_lock(&rpmhpd_lock);
258 for (i = 0; i < pd->level_count; i++)
259 if (level <= pd->level[i])
260 break;
263 * If the level requested is more than that supported by the
264 * max corner, just set it to max anyway.
266 if (i == pd->level_count)
267 i--;
269 if (pd->enabled) {
270 ret = rpmhpd_aggregate_corner(pd, i);
271 if (ret)
272 goto out;
275 pd->corner = i;
276 out:
277 mutex_unlock(&rpmhpd_lock);
279 return ret;
282 static unsigned int rpmhpd_get_performance_state(struct generic_pm_domain *genpd,
283 struct dev_pm_opp *opp)
285 return dev_pm_opp_get_level(opp);
288 static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd)
290 int i;
291 const u16 *buf;
293 buf = cmd_db_read_aux_data(rpmhpd->res_name, &rpmhpd->level_count);
294 if (IS_ERR(buf))
295 return PTR_ERR(buf);
297 /* 2 bytes used for each command DB aux data entry */
298 rpmhpd->level_count >>= 1;
300 if (rpmhpd->level_count > RPMH_ARC_MAX_LEVELS)
301 return -EINVAL;
303 for (i = 0; i < rpmhpd->level_count; i++) {
304 rpmhpd->level[i] = buf[i];
307 * The AUX data may be zero padded. These 0 valued entries at
308 * the end of the map must be ignored.
310 if (i > 0 && rpmhpd->level[i] == 0) {
311 rpmhpd->level_count = i;
312 break;
314 pr_debug("%s: ARC hlvl=%2d --> vlvl=%4u\n", rpmhpd->res_name, i,
315 rpmhpd->level[i]);
318 return 0;
321 static int rpmhpd_probe(struct platform_device *pdev)
323 int i, ret;
324 size_t num_pds;
325 struct device *dev = &pdev->dev;
326 struct genpd_onecell_data *data;
327 struct rpmhpd **rpmhpds;
328 const struct rpmhpd_desc *desc;
330 desc = of_device_get_match_data(dev);
331 if (!desc)
332 return -EINVAL;
334 rpmhpds = desc->rpmhpds;
335 num_pds = desc->num_pds;
337 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
338 if (!data)
339 return -ENOMEM;
341 data->domains = devm_kcalloc(dev, num_pds, sizeof(*data->domains),
342 GFP_KERNEL);
343 if (!data->domains)
344 return -ENOMEM;
346 data->num_domains = num_pds;
348 for (i = 0; i < num_pds; i++) {
349 if (!rpmhpds[i]) {
350 dev_warn(dev, "rpmhpds[%d] is empty\n", i);
351 continue;
354 rpmhpds[i]->dev = dev;
355 rpmhpds[i]->addr = cmd_db_read_addr(rpmhpds[i]->res_name);
356 if (!rpmhpds[i]->addr) {
357 dev_err(dev, "Could not find RPMh address for resource %s\n",
358 rpmhpds[i]->res_name);
359 return -ENODEV;
362 ret = cmd_db_read_slave_id(rpmhpds[i]->res_name);
363 if (ret != CMD_DB_HW_ARC) {
364 dev_err(dev, "RPMh slave ID mismatch\n");
365 return -EINVAL;
368 ret = rpmhpd_update_level_mapping(rpmhpds[i]);
369 if (ret)
370 return ret;
372 rpmhpds[i]->pd.power_off = rpmhpd_power_off;
373 rpmhpds[i]->pd.power_on = rpmhpd_power_on;
374 rpmhpds[i]->pd.set_performance_state = rpmhpd_set_performance_state;
375 rpmhpds[i]->pd.opp_to_performance_state = rpmhpd_get_performance_state;
376 pm_genpd_init(&rpmhpds[i]->pd, NULL, true);
378 data->domains[i] = &rpmhpds[i]->pd;
381 /* Add subdomains */
382 for (i = 0; i < num_pds; i++) {
383 if (!rpmhpds[i])
384 continue;
385 if (rpmhpds[i]->parent)
386 pm_genpd_add_subdomain(rpmhpds[i]->parent,
387 &rpmhpds[i]->pd);
390 return of_genpd_add_provider_onecell(pdev->dev.of_node, data);
393 static struct platform_driver rpmhpd_driver = {
394 .driver = {
395 .name = "qcom-rpmhpd",
396 .of_match_table = rpmhpd_match_table,
397 .suppress_bind_attrs = true,
399 .probe = rpmhpd_probe,
402 static int __init rpmhpd_init(void)
404 return platform_driver_register(&rpmhpd_driver);
406 core_initcall(rpmhpd_init);