1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
6 #include <linux/atomic.h>
8 #include <linux/interrupt.h>
9 #include <linux/jiffies.h>
10 #include <linux/kernel.h>
11 #include <linux/list.h>
12 #include <linux/module.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/types.h>
18 #include <linux/wait.h>
20 #include <soc/qcom/rpmh.h>
22 #include "rpmh-internal.h"
24 #define RPMH_TIMEOUT_MS msecs_to_jiffies(10000)
26 #define DEFINE_RPMH_MSG_ONSTACK(dev, s, q, name) \
27 struct rpmh_request name = { \
32 .wait_for_compl = true, \
37 .needs_free = false, \
40 #define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client)
43 * struct cache_req: the request object for caching
45 * @addr: the address of the resource
46 * @sleep_val: the sleep vote
47 * @wake_val: the wake vote
48 * @list: linked list obj
54 struct list_head list
;
58 * struct batch_cache_req - An entry in our batch catch
60 * @list: linked list obj
61 * @count: number of messages
62 * @rpm_msgs: the messages
65 struct batch_cache_req
{
66 struct list_head list
;
68 struct rpmh_request rpm_msgs
[];
71 static struct rpmh_ctrlr
*get_rpmh_ctrlr(const struct device
*dev
)
73 struct rsc_drv
*drv
= dev_get_drvdata(dev
->parent
);
78 void rpmh_tx_done(const struct tcs_request
*msg
, int r
)
80 struct rpmh_request
*rpm_msg
= container_of(msg
, struct rpmh_request
,
82 struct completion
*compl = rpm_msg
->completion
;
83 bool free
= rpm_msg
->needs_free
;
88 dev_err(rpm_msg
->dev
, "RPMH TX fail in msg addr=%#x, err=%d\n",
89 rpm_msg
->msg
.cmds
[0].addr
, r
);
94 /* Signal the blocking thread we are done */
102 static struct cache_req
*__find_req(struct rpmh_ctrlr
*ctrlr
, u32 addr
)
104 struct cache_req
*p
, *req
= NULL
;
106 list_for_each_entry(p
, &ctrlr
->cache
, list
) {
107 if (p
->addr
== addr
) {
116 static struct cache_req
*cache_rpm_request(struct rpmh_ctrlr
*ctrlr
,
117 enum rpmh_state state
,
120 struct cache_req
*req
;
123 spin_lock_irqsave(&ctrlr
->cache_lock
, flags
);
124 req
= __find_req(ctrlr
, cmd
->addr
);
128 req
= kzalloc(sizeof(*req
), GFP_ATOMIC
);
130 req
= ERR_PTR(-ENOMEM
);
134 req
->addr
= cmd
->addr
;
135 req
->sleep_val
= req
->wake_val
= UINT_MAX
;
136 INIT_LIST_HEAD(&req
->list
);
137 list_add_tail(&req
->list
, &ctrlr
->cache
);
141 case RPMH_ACTIVE_ONLY_STATE
:
142 if (req
->sleep_val
!= UINT_MAX
)
143 req
->wake_val
= cmd
->data
;
145 case RPMH_WAKE_ONLY_STATE
:
146 req
->wake_val
= cmd
->data
;
148 case RPMH_SLEEP_STATE
:
149 req
->sleep_val
= cmd
->data
;
157 spin_unlock_irqrestore(&ctrlr
->cache_lock
, flags
);
163 * __rpmh_write: Cache and send the RPMH request
165 * @dev: The device making the request
166 * @state: Active/Sleep request type
167 * @rpm_msg: The data that needs to be sent (cmds).
169 * Cache the RPMH request and send if the state is ACTIVE_ONLY.
170 * SLEEP/WAKE_ONLY requests are not sent to the controller at
171 * this time. Use rpmh_flush() to send them to the controller.
173 static int __rpmh_write(const struct device
*dev
, enum rpmh_state state
,
174 struct rpmh_request
*rpm_msg
)
176 struct rpmh_ctrlr
*ctrlr
= get_rpmh_ctrlr(dev
);
178 struct cache_req
*req
;
181 rpm_msg
->msg
.state
= state
;
183 /* Cache the request in our store and link the payload */
184 for (i
= 0; i
< rpm_msg
->msg
.num_cmds
; i
++) {
185 req
= cache_rpm_request(ctrlr
, state
, &rpm_msg
->msg
.cmds
[i
]);
190 rpm_msg
->msg
.state
= state
;
192 if (state
== RPMH_ACTIVE_ONLY_STATE
) {
193 WARN_ON(irqs_disabled());
194 ret
= rpmh_rsc_send_data(ctrlr_to_drv(ctrlr
), &rpm_msg
->msg
);
196 /* Clean up our call by spoofing tx_done */
198 rpmh_tx_done(&rpm_msg
->msg
, ret
);
204 static int __fill_rpmh_msg(struct rpmh_request
*req
, enum rpmh_state state
,
205 const struct tcs_cmd
*cmd
, u32 n
)
207 if (!cmd
|| !n
|| n
> MAX_RPMH_PAYLOAD
)
210 memcpy(req
->cmd
, cmd
, n
* sizeof(*cmd
));
212 req
->msg
.state
= state
;
213 req
->msg
.cmds
= req
->cmd
;
214 req
->msg
.num_cmds
= n
;
220 * rpmh_write_async: Write a set of RPMH commands
222 * @dev: The device making the request
223 * @state: Active/sleep set
224 * @cmd: The payload data
225 * @n: The number of elements in payload
227 * Write a set of RPMH commands, the order of commands is maintained
228 * and will be sent as a single shot.
230 int rpmh_write_async(const struct device
*dev
, enum rpmh_state state
,
231 const struct tcs_cmd
*cmd
, u32 n
)
233 struct rpmh_request
*rpm_msg
;
236 rpm_msg
= kzalloc(sizeof(*rpm_msg
), GFP_ATOMIC
);
239 rpm_msg
->needs_free
= true;
241 ret
= __fill_rpmh_msg(rpm_msg
, state
, cmd
, n
);
247 return __rpmh_write(dev
, state
, rpm_msg
);
249 EXPORT_SYMBOL(rpmh_write_async
);
252 * rpmh_write: Write a set of RPMH commands and block until response
254 * @rc: The RPMH handle got from rpmh_get_client
255 * @state: Active/sleep set
256 * @cmd: The payload data
257 * @n: The number of elements in @cmd
259 * May sleep. Do not call from atomic contexts.
261 int rpmh_write(const struct device
*dev
, enum rpmh_state state
,
262 const struct tcs_cmd
*cmd
, u32 n
)
264 DECLARE_COMPLETION_ONSTACK(compl);
265 DEFINE_RPMH_MSG_ONSTACK(dev
, state
, &compl, rpm_msg
);
268 if (!cmd
|| !n
|| n
> MAX_RPMH_PAYLOAD
)
271 memcpy(rpm_msg
.cmd
, cmd
, n
* sizeof(*cmd
));
272 rpm_msg
.msg
.num_cmds
= n
;
274 ret
= __rpmh_write(dev
, state
, &rpm_msg
);
278 ret
= wait_for_completion_timeout(&compl, RPMH_TIMEOUT_MS
);
280 return (ret
> 0) ? 0 : -ETIMEDOUT
;
282 EXPORT_SYMBOL(rpmh_write
);
284 static void cache_batch(struct rpmh_ctrlr
*ctrlr
, struct batch_cache_req
*req
)
288 spin_lock_irqsave(&ctrlr
->cache_lock
, flags
);
289 list_add_tail(&req
->list
, &ctrlr
->batch_cache
);
290 spin_unlock_irqrestore(&ctrlr
->cache_lock
, flags
);
293 static int flush_batch(struct rpmh_ctrlr
*ctrlr
)
295 struct batch_cache_req
*req
;
296 const struct rpmh_request
*rpm_msg
;
301 /* Send Sleep/Wake requests to the controller, expect no response */
302 spin_lock_irqsave(&ctrlr
->cache_lock
, flags
);
303 list_for_each_entry(req
, &ctrlr
->batch_cache
, list
) {
304 for (i
= 0; i
< req
->count
; i
++) {
305 rpm_msg
= req
->rpm_msgs
+ i
;
306 ret
= rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr
),
312 spin_unlock_irqrestore(&ctrlr
->cache_lock
, flags
);
317 static void invalidate_batch(struct rpmh_ctrlr
*ctrlr
)
319 struct batch_cache_req
*req
, *tmp
;
322 spin_lock_irqsave(&ctrlr
->cache_lock
, flags
);
323 list_for_each_entry_safe(req
, tmp
, &ctrlr
->batch_cache
, list
)
325 INIT_LIST_HEAD(&ctrlr
->batch_cache
);
326 spin_unlock_irqrestore(&ctrlr
->cache_lock
, flags
);
330 * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
333 * @dev: the device making the request
334 * @state: Active/sleep set
335 * @cmd: The payload data
336 * @n: The array of count of elements in each batch, 0 terminated.
338 * Write a request to the RSC controller without caching. If the request
339 * state is ACTIVE, then the requests are treated as completion request
340 * and sent to the controller immediately. The function waits until all the
341 * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
342 * request is sent as fire-n-forget and no ack is expected.
344 * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
346 int rpmh_write_batch(const struct device
*dev
, enum rpmh_state state
,
347 const struct tcs_cmd
*cmd
, u32
*n
)
349 struct batch_cache_req
*req
;
350 struct rpmh_request
*rpm_msgs
;
351 struct completion
*compls
;
352 struct rpmh_ctrlr
*ctrlr
= get_rpmh_ctrlr(dev
);
353 unsigned long time_left
;
366 ptr
= kzalloc(sizeof(*req
) +
367 count
* (sizeof(req
->rpm_msgs
[0]) + sizeof(*compls
)),
373 compls
= ptr
+ sizeof(*req
) + count
* sizeof(*rpm_msgs
);
376 rpm_msgs
= req
->rpm_msgs
;
378 for (i
= 0; i
< count
; i
++) {
379 __fill_rpmh_msg(rpm_msgs
+ i
, state
, cmd
, n
[i
]);
383 if (state
!= RPMH_ACTIVE_ONLY_STATE
) {
384 cache_batch(ctrlr
, req
);
388 for (i
= 0; i
< count
; i
++) {
389 struct completion
*compl = &compls
[i
];
391 init_completion(compl);
392 rpm_msgs
[i
].completion
= compl;
393 ret
= rpmh_rsc_send_data(ctrlr_to_drv(ctrlr
), &rpm_msgs
[i
].msg
);
395 pr_err("Error(%d) sending RPMH message addr=%#x\n",
396 ret
, rpm_msgs
[i
].msg
.cmds
[0].addr
);
401 time_left
= RPMH_TIMEOUT_MS
;
403 time_left
= wait_for_completion_timeout(&compls
[i
], time_left
);
406 * Better hope they never finish because they'll signal
407 * the completion that we're going to free once
408 * we've returned from this function.
421 EXPORT_SYMBOL(rpmh_write_batch
);
423 static int is_req_valid(struct cache_req
*req
)
425 return (req
->sleep_val
!= UINT_MAX
&&
426 req
->wake_val
!= UINT_MAX
&&
427 req
->sleep_val
!= req
->wake_val
);
430 static int send_single(const struct device
*dev
, enum rpmh_state state
,
433 DEFINE_RPMH_MSG_ONSTACK(dev
, state
, NULL
, rpm_msg
);
434 struct rpmh_ctrlr
*ctrlr
= get_rpmh_ctrlr(dev
);
436 /* Wake sets are always complete and sleep sets are not */
437 rpm_msg
.msg
.wait_for_compl
= (state
== RPMH_WAKE_ONLY_STATE
);
438 rpm_msg
.cmd
[0].addr
= addr
;
439 rpm_msg
.cmd
[0].data
= data
;
440 rpm_msg
.msg
.num_cmds
= 1;
442 return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr
), &rpm_msg
.msg
);
446 * rpmh_flush: Flushes the buffered active and sleep sets to TCS
448 * @dev: The device making the request
450 * Return: -EBUSY if the controller is busy, probably waiting on a response
451 * to a RPMH request sent earlier.
453 * This function is always called from the sleep code from the last CPU
454 * that is powering down the entire system. Since no other RPMH API would be
455 * executing at this time, it is safe to run lockless.
457 int rpmh_flush(const struct device
*dev
)
460 struct rpmh_ctrlr
*ctrlr
= get_rpmh_ctrlr(dev
);
464 pr_debug("Skipping flush, TCS has latest data.\n");
468 /* First flush the cached batch requests */
469 ret
= flush_batch(ctrlr
);
474 * Nobody else should be calling this function other than system PM,
475 * hence we can run without locks.
477 list_for_each_entry(p
, &ctrlr
->cache
, list
) {
478 if (!is_req_valid(p
)) {
479 pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
480 __func__
, p
->addr
, p
->sleep_val
, p
->wake_val
);
483 ret
= send_single(dev
, RPMH_SLEEP_STATE
, p
->addr
, p
->sleep_val
);
486 ret
= send_single(dev
, RPMH_WAKE_ONLY_STATE
,
487 p
->addr
, p
->wake_val
);
492 ctrlr
->dirty
= false;
496 EXPORT_SYMBOL(rpmh_flush
);
499 * rpmh_invalidate: Invalidate all sleep and active sets
502 * @dev: The device making the request
504 * Invalidate the sleep and active values in the TCS blocks.
506 int rpmh_invalidate(const struct device
*dev
)
508 struct rpmh_ctrlr
*ctrlr
= get_rpmh_ctrlr(dev
);
511 invalidate_batch(ctrlr
);
515 ret
= rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr
));
516 } while (ret
== -EAGAIN
);
520 EXPORT_SYMBOL(rpmh_invalidate
);