Linux 4.19.133
[linux/fpc-iii.git] / drivers / soc / qcom / rpmh.c
blobab8f731a3426ba929cddcb7a3abe9100dc5d5956
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4 */
6 #include <linux/atomic.h>
7 #include <linux/bug.h>
8 #include <linux/interrupt.h>
9 #include <linux/jiffies.h>
10 #include <linux/kernel.h>
11 #include <linux/list.h>
12 #include <linux/module.h>
13 #include <linux/of.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/types.h>
18 #include <linux/wait.h>
20 #include <soc/qcom/rpmh.h>
22 #include "rpmh-internal.h"
24 #define RPMH_TIMEOUT_MS msecs_to_jiffies(10000)
26 #define DEFINE_RPMH_MSG_ONSTACK(dev, s, q, name) \
27 struct rpmh_request name = { \
28 .msg = { \
29 .state = s, \
30 .cmds = name.cmd, \
31 .num_cmds = 0, \
32 .wait_for_compl = true, \
33 }, \
34 .cmd = { { 0 } }, \
35 .completion = q, \
36 .dev = dev, \
37 .needs_free = false, \
40 #define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client)
42 /**
43 * struct cache_req: the request object for caching
45 * @addr: the address of the resource
46 * @sleep_val: the sleep vote
47 * @wake_val: the wake vote
48 * @list: linked list obj
50 struct cache_req {
51 u32 addr;
52 u32 sleep_val;
53 u32 wake_val;
54 struct list_head list;
57 /**
58 * struct batch_cache_req - An entry in our batch catch
60 * @list: linked list obj
61 * @count: number of messages
62 * @rpm_msgs: the messages
65 struct batch_cache_req {
66 struct list_head list;
67 int count;
68 struct rpmh_request rpm_msgs[];
71 static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev)
73 struct rsc_drv *drv = dev_get_drvdata(dev->parent);
75 return &drv->client;
78 void rpmh_tx_done(const struct tcs_request *msg, int r)
80 struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
81 msg);
82 struct completion *compl = rpm_msg->completion;
83 bool free = rpm_msg->needs_free;
85 rpm_msg->err = r;
87 if (r)
88 dev_err(rpm_msg->dev, "RPMH TX fail in msg addr=%#x, err=%d\n",
89 rpm_msg->msg.cmds[0].addr, r);
91 if (!compl)
92 goto exit;
94 /* Signal the blocking thread we are done */
95 complete(compl);
97 exit:
98 if (free)
99 kfree(rpm_msg);
102 static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr)
104 struct cache_req *p, *req = NULL;
106 list_for_each_entry(p, &ctrlr->cache, list) {
107 if (p->addr == addr) {
108 req = p;
109 break;
113 return req;
116 static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
117 enum rpmh_state state,
118 struct tcs_cmd *cmd)
120 struct cache_req *req;
121 unsigned long flags;
123 spin_lock_irqsave(&ctrlr->cache_lock, flags);
124 req = __find_req(ctrlr, cmd->addr);
125 if (req)
126 goto existing;
128 req = kzalloc(sizeof(*req), GFP_ATOMIC);
129 if (!req) {
130 req = ERR_PTR(-ENOMEM);
131 goto unlock;
134 req->addr = cmd->addr;
135 req->sleep_val = req->wake_val = UINT_MAX;
136 INIT_LIST_HEAD(&req->list);
137 list_add_tail(&req->list, &ctrlr->cache);
139 existing:
140 switch (state) {
141 case RPMH_ACTIVE_ONLY_STATE:
142 if (req->sleep_val != UINT_MAX)
143 req->wake_val = cmd->data;
144 break;
145 case RPMH_WAKE_ONLY_STATE:
146 req->wake_val = cmd->data;
147 break;
148 case RPMH_SLEEP_STATE:
149 req->sleep_val = cmd->data;
150 break;
151 default:
152 break;
155 ctrlr->dirty = true;
156 unlock:
157 spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
159 return req;
163 * __rpmh_write: Cache and send the RPMH request
165 * @dev: The device making the request
166 * @state: Active/Sleep request type
167 * @rpm_msg: The data that needs to be sent (cmds).
169 * Cache the RPMH request and send if the state is ACTIVE_ONLY.
170 * SLEEP/WAKE_ONLY requests are not sent to the controller at
171 * this time. Use rpmh_flush() to send them to the controller.
173 static int __rpmh_write(const struct device *dev, enum rpmh_state state,
174 struct rpmh_request *rpm_msg)
176 struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
177 int ret = -EINVAL;
178 struct cache_req *req;
179 int i;
181 rpm_msg->msg.state = state;
183 /* Cache the request in our store and link the payload */
184 for (i = 0; i < rpm_msg->msg.num_cmds; i++) {
185 req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]);
186 if (IS_ERR(req))
187 return PTR_ERR(req);
190 rpm_msg->msg.state = state;
192 if (state == RPMH_ACTIVE_ONLY_STATE) {
193 WARN_ON(irqs_disabled());
194 ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
195 } else {
196 ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
197 &rpm_msg->msg);
198 /* Clean up our call by spoofing tx_done */
199 rpmh_tx_done(&rpm_msg->msg, ret);
202 return ret;
205 static int __fill_rpmh_msg(struct rpmh_request *req, enum rpmh_state state,
206 const struct tcs_cmd *cmd, u32 n)
208 if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
209 return -EINVAL;
211 memcpy(req->cmd, cmd, n * sizeof(*cmd));
213 req->msg.state = state;
214 req->msg.cmds = req->cmd;
215 req->msg.num_cmds = n;
217 return 0;
221 * rpmh_write_async: Write a set of RPMH commands
223 * @dev: The device making the request
224 * @state: Active/sleep set
225 * @cmd: The payload data
226 * @n: The number of elements in payload
228 * Write a set of RPMH commands, the order of commands is maintained
229 * and will be sent as a single shot.
231 int rpmh_write_async(const struct device *dev, enum rpmh_state state,
232 const struct tcs_cmd *cmd, u32 n)
234 struct rpmh_request *rpm_msg;
235 int ret;
237 rpm_msg = kzalloc(sizeof(*rpm_msg), GFP_ATOMIC);
238 if (!rpm_msg)
239 return -ENOMEM;
240 rpm_msg->needs_free = true;
242 ret = __fill_rpmh_msg(rpm_msg, state, cmd, n);
243 if (ret) {
244 kfree(rpm_msg);
245 return ret;
248 return __rpmh_write(dev, state, rpm_msg);
250 EXPORT_SYMBOL(rpmh_write_async);
253 * rpmh_write: Write a set of RPMH commands and block until response
255 * @rc: The RPMH handle got from rpmh_get_client
256 * @state: Active/sleep set
257 * @cmd: The payload data
258 * @n: The number of elements in @cmd
260 * May sleep. Do not call from atomic contexts.
262 int rpmh_write(const struct device *dev, enum rpmh_state state,
263 const struct tcs_cmd *cmd, u32 n)
265 DECLARE_COMPLETION_ONSTACK(compl);
266 DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg);
267 int ret;
269 if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
270 return -EINVAL;
272 memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
273 rpm_msg.msg.num_cmds = n;
275 ret = __rpmh_write(dev, state, &rpm_msg);
276 if (ret)
277 return ret;
279 ret = wait_for_completion_timeout(&compl, RPMH_TIMEOUT_MS);
280 WARN_ON(!ret);
281 return (ret > 0) ? 0 : -ETIMEDOUT;
283 EXPORT_SYMBOL(rpmh_write);
285 static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
287 unsigned long flags;
289 spin_lock_irqsave(&ctrlr->cache_lock, flags);
290 list_add_tail(&req->list, &ctrlr->batch_cache);
291 spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
294 static int flush_batch(struct rpmh_ctrlr *ctrlr)
296 struct batch_cache_req *req;
297 const struct rpmh_request *rpm_msg;
298 unsigned long flags;
299 int ret = 0;
300 int i;
302 /* Send Sleep/Wake requests to the controller, expect no response */
303 spin_lock_irqsave(&ctrlr->cache_lock, flags);
304 list_for_each_entry(req, &ctrlr->batch_cache, list) {
305 for (i = 0; i < req->count; i++) {
306 rpm_msg = req->rpm_msgs + i;
307 ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
308 &rpm_msg->msg);
309 if (ret)
310 break;
313 spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
315 return ret;
318 static void invalidate_batch(struct rpmh_ctrlr *ctrlr)
320 struct batch_cache_req *req, *tmp;
321 unsigned long flags;
323 spin_lock_irqsave(&ctrlr->cache_lock, flags);
324 list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
325 kfree(req);
326 INIT_LIST_HEAD(&ctrlr->batch_cache);
327 spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
331 * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
332 * batch to finish.
334 * @dev: the device making the request
335 * @state: Active/sleep set
336 * @cmd: The payload data
337 * @n: The array of count of elements in each batch, 0 terminated.
339 * Write a request to the RSC controller without caching. If the request
340 * state is ACTIVE, then the requests are treated as completion request
341 * and sent to the controller immediately. The function waits until all the
342 * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
343 * request is sent as fire-n-forget and no ack is expected.
345 * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
347 int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
348 const struct tcs_cmd *cmd, u32 *n)
350 struct batch_cache_req *req;
351 struct rpmh_request *rpm_msgs;
352 struct completion *compls;
353 struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
354 unsigned long time_left;
355 int count = 0;
356 int ret, i;
357 void *ptr;
359 if (!cmd || !n)
360 return -EINVAL;
362 while (n[count] > 0)
363 count++;
364 if (!count)
365 return -EINVAL;
367 ptr = kzalloc(sizeof(*req) +
368 count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
369 GFP_ATOMIC);
370 if (!ptr)
371 return -ENOMEM;
373 req = ptr;
374 compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
376 req->count = count;
377 rpm_msgs = req->rpm_msgs;
379 for (i = 0; i < count; i++) {
380 __fill_rpmh_msg(rpm_msgs + i, state, cmd, n[i]);
381 cmd += n[i];
384 if (state != RPMH_ACTIVE_ONLY_STATE) {
385 cache_batch(ctrlr, req);
386 return 0;
389 for (i = 0; i < count; i++) {
390 struct completion *compl = &compls[i];
392 init_completion(compl);
393 rpm_msgs[i].completion = compl;
394 ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
395 if (ret) {
396 pr_err("Error(%d) sending RPMH message addr=%#x\n",
397 ret, rpm_msgs[i].msg.cmds[0].addr);
398 break;
402 time_left = RPMH_TIMEOUT_MS;
403 while (i--) {
404 time_left = wait_for_completion_timeout(&compls[i], time_left);
405 if (!time_left) {
407 * Better hope they never finish because they'll signal
408 * the completion that we're going to free once
409 * we've returned from this function.
411 WARN_ON(1);
412 ret = -ETIMEDOUT;
413 goto exit;
417 exit:
418 kfree(ptr);
420 return ret;
422 EXPORT_SYMBOL(rpmh_write_batch);
424 static int is_req_valid(struct cache_req *req)
426 return (req->sleep_val != UINT_MAX &&
427 req->wake_val != UINT_MAX &&
428 req->sleep_val != req->wake_val);
431 static int send_single(const struct device *dev, enum rpmh_state state,
432 u32 addr, u32 data)
434 DEFINE_RPMH_MSG_ONSTACK(dev, state, NULL, rpm_msg);
435 struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
437 /* Wake sets are always complete and sleep sets are not */
438 rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
439 rpm_msg.cmd[0].addr = addr;
440 rpm_msg.cmd[0].data = data;
441 rpm_msg.msg.num_cmds = 1;
443 return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), &rpm_msg.msg);
447 * rpmh_flush: Flushes the buffered active and sleep sets to TCS
449 * @dev: The device making the request
451 * Return: -EBUSY if the controller is busy, probably waiting on a response
452 * to a RPMH request sent earlier.
454 * This function is always called from the sleep code from the last CPU
455 * that is powering down the entire system. Since no other RPMH API would be
456 * executing at this time, it is safe to run lockless.
458 int rpmh_flush(const struct device *dev)
460 struct cache_req *p;
461 struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
462 int ret;
464 if (!ctrlr->dirty) {
465 pr_debug("Skipping flush, TCS has latest data.\n");
466 return 0;
469 /* First flush the cached batch requests */
470 ret = flush_batch(ctrlr);
471 if (ret)
472 return ret;
475 * Nobody else should be calling this function other than system PM,
476 * hence we can run without locks.
478 list_for_each_entry(p, &ctrlr->cache, list) {
479 if (!is_req_valid(p)) {
480 pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
481 __func__, p->addr, p->sleep_val, p->wake_val);
482 continue;
484 ret = send_single(dev, RPMH_SLEEP_STATE, p->addr, p->sleep_val);
485 if (ret)
486 return ret;
487 ret = send_single(dev, RPMH_WAKE_ONLY_STATE,
488 p->addr, p->wake_val);
489 if (ret)
490 return ret;
493 ctrlr->dirty = false;
495 return 0;
497 EXPORT_SYMBOL(rpmh_flush);
500 * rpmh_invalidate: Invalidate all sleep and active sets
501 * sets.
503 * @dev: The device making the request
505 * Invalidate the sleep and active values in the TCS blocks.
507 int rpmh_invalidate(const struct device *dev)
509 struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
510 int ret;
512 invalidate_batch(ctrlr);
513 ctrlr->dirty = true;
515 do {
516 ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
517 } while (ret == -EAGAIN);
519 return ret;
521 EXPORT_SYMBOL(rpmh_invalidate);