1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
6 #define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
8 #include <linux/atomic.h>
9 #include <linux/delay.h>
10 #include <linux/interrupt.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
15 #include <linux/of_irq.h>
16 #include <linux/of_platform.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
21 #include <soc/qcom/cmd-db.h>
22 #include <soc/qcom/tcs.h>
23 #include <dt-bindings/soc/qcom,rpmh-rsc.h>
25 #include "rpmh-internal.h"
27 #define CREATE_TRACE_POINTS
28 #include "trace-rpmh.h"
30 #define RSC_DRV_TCS_OFFSET 672
31 #define RSC_DRV_CMD_OFFSET 20
33 /* DRV Configuration Information Register */
34 #define DRV_PRNT_CHLD_CONFIG 0x0C
35 #define DRV_NUM_TCS_MASK 0x3F
36 #define DRV_NUM_TCS_SHIFT 6
37 #define DRV_NCPT_MASK 0x1F
38 #define DRV_NCPT_SHIFT 27
40 /* Register offsets */
41 #define RSC_DRV_IRQ_ENABLE 0x00
42 #define RSC_DRV_IRQ_STATUS 0x04
43 #define RSC_DRV_IRQ_CLEAR 0x08
44 #define RSC_DRV_CMD_WAIT_FOR_CMPL 0x10
45 #define RSC_DRV_CONTROL 0x14
46 #define RSC_DRV_STATUS 0x18
47 #define RSC_DRV_CMD_ENABLE 0x1C
48 #define RSC_DRV_CMD_MSGID 0x30
49 #define RSC_DRV_CMD_ADDR 0x34
50 #define RSC_DRV_CMD_DATA 0x38
51 #define RSC_DRV_CMD_STATUS 0x3C
52 #define RSC_DRV_CMD_RESP_DATA 0x40
54 #define TCS_AMC_MODE_ENABLE BIT(16)
55 #define TCS_AMC_MODE_TRIGGER BIT(24)
57 /* TCS CMD register bit mask */
58 #define CMD_MSGID_LEN 8
59 #define CMD_MSGID_RESP_REQ BIT(8)
60 #define CMD_MSGID_WRITE BIT(16)
61 #define CMD_STATUS_ISSUED BIT(8)
62 #define CMD_STATUS_COMPL BIT(16)
64 static u32
read_tcs_reg(struct rsc_drv
*drv
, int reg
, int tcs_id
, int cmd_id
)
66 return readl_relaxed(drv
->tcs_base
+ reg
+ RSC_DRV_TCS_OFFSET
* tcs_id
+
67 RSC_DRV_CMD_OFFSET
* cmd_id
);
70 static void write_tcs_cmd(struct rsc_drv
*drv
, int reg
, int tcs_id
, int cmd_id
,
73 writel_relaxed(data
, drv
->tcs_base
+ reg
+ RSC_DRV_TCS_OFFSET
* tcs_id
+
74 RSC_DRV_CMD_OFFSET
* cmd_id
);
77 static void write_tcs_reg(struct rsc_drv
*drv
, int reg
, int tcs_id
, u32 data
)
79 writel_relaxed(data
, drv
->tcs_base
+ reg
+ RSC_DRV_TCS_OFFSET
* tcs_id
);
82 static void write_tcs_reg_sync(struct rsc_drv
*drv
, int reg
, int tcs_id
,
85 writel(data
, drv
->tcs_base
+ reg
+ RSC_DRV_TCS_OFFSET
* tcs_id
);
87 if (data
== readl(drv
->tcs_base
+ reg
+
88 RSC_DRV_TCS_OFFSET
* tcs_id
))
94 static bool tcs_is_free(struct rsc_drv
*drv
, int tcs_id
)
96 return !test_bit(tcs_id
, drv
->tcs_in_use
) &&
97 read_tcs_reg(drv
, RSC_DRV_STATUS
, tcs_id
, 0);
100 static struct tcs_group
*get_tcs_of_type(struct rsc_drv
*drv
, int type
)
102 return &drv
->tcs
[type
];
105 static int tcs_invalidate(struct rsc_drv
*drv
, int type
)
108 struct tcs_group
*tcs
;
110 tcs
= get_tcs_of_type(drv
, type
);
112 spin_lock(&tcs
->lock
);
113 if (bitmap_empty(tcs
->slots
, MAX_TCS_SLOTS
)) {
114 spin_unlock(&tcs
->lock
);
118 for (m
= tcs
->offset
; m
< tcs
->offset
+ tcs
->num_tcs
; m
++) {
119 if (!tcs_is_free(drv
, m
)) {
120 spin_unlock(&tcs
->lock
);
123 write_tcs_reg_sync(drv
, RSC_DRV_CMD_ENABLE
, m
, 0);
124 write_tcs_reg_sync(drv
, RSC_DRV_CMD_WAIT_FOR_CMPL
, m
, 0);
126 bitmap_zero(tcs
->slots
, MAX_TCS_SLOTS
);
127 spin_unlock(&tcs
->lock
);
133 * rpmh_rsc_invalidate - Invalidate sleep and wake TCSes
135 * @drv: the RSC controller
137 int rpmh_rsc_invalidate(struct rsc_drv
*drv
)
141 ret
= tcs_invalidate(drv
, SLEEP_TCS
);
143 ret
= tcs_invalidate(drv
, WAKE_TCS
);
148 static struct tcs_group
*get_tcs_for_msg(struct rsc_drv
*drv
,
149 const struct tcs_request
*msg
)
152 struct tcs_group
*tcs
;
154 switch (msg
->state
) {
155 case RPMH_ACTIVE_ONLY_STATE
:
158 case RPMH_WAKE_ONLY_STATE
:
161 case RPMH_SLEEP_STATE
:
165 return ERR_PTR(-EINVAL
);
169 * If we are making an active request on a RSC that does not have a
170 * dedicated TCS for active state use, then re-purpose a wake TCS to
172 * NOTE: The driver must be aware that this RSC does not have a
173 * dedicated AMC, and therefore would invalidate the sleep and wake
174 * TCSes before making an active state request.
176 tcs
= get_tcs_of_type(drv
, type
);
177 if (msg
->state
== RPMH_ACTIVE_ONLY_STATE
&& !tcs
->num_tcs
) {
178 tcs
= get_tcs_of_type(drv
, WAKE_TCS
);
180 ret
= rpmh_rsc_invalidate(drv
);
189 static const struct tcs_request
*get_req_from_tcs(struct rsc_drv
*drv
,
192 struct tcs_group
*tcs
;
195 for (i
= 0; i
< TCS_TYPE_NR
; i
++) {
197 if (tcs
->mask
& BIT(tcs_id
))
198 return tcs
->req
[tcs_id
- tcs
->offset
];
205 * tcs_tx_done: TX Done interrupt handler
207 static irqreturn_t
tcs_tx_done(int irq
, void *p
)
209 struct rsc_drv
*drv
= p
;
211 unsigned long irq_status
;
212 const struct tcs_request
*req
;
215 irq_status
= read_tcs_reg(drv
, RSC_DRV_IRQ_STATUS
, 0, 0);
217 for_each_set_bit(i
, &irq_status
, BITS_PER_LONG
) {
218 req
= get_req_from_tcs(drv
, i
);
225 for (j
= 0; j
< req
->num_cmds
; j
++) {
229 sts
= read_tcs_reg(drv
, RSC_DRV_CMD_STATUS
, i
, j
);
230 if (!(sts
& CMD_STATUS_ISSUED
) ||
231 ((req
->wait_for_compl
|| cmd
->wait
) &&
232 !(sts
& CMD_STATUS_COMPL
))) {
233 pr_err("Incomplete request: %s: addr=%#x data=%#x",
234 drv
->name
, cmd
->addr
, cmd
->data
);
239 trace_rpmh_tx_done(drv
, i
, req
, err
);
241 /* Reclaim the TCS */
242 write_tcs_reg(drv
, RSC_DRV_CMD_ENABLE
, i
, 0);
243 write_tcs_reg(drv
, RSC_DRV_CMD_WAIT_FOR_CMPL
, i
, 0);
244 write_tcs_reg(drv
, RSC_DRV_IRQ_CLEAR
, 0, BIT(i
));
245 spin_lock(&drv
->lock
);
246 clear_bit(i
, drv
->tcs_in_use
);
247 spin_unlock(&drv
->lock
);
249 rpmh_tx_done(req
, err
);
255 static void __tcs_buffer_write(struct rsc_drv
*drv
, int tcs_id
, int cmd_id
,
256 const struct tcs_request
*msg
)
258 u32 msgid
, cmd_msgid
;
264 cmd_msgid
= CMD_MSGID_LEN
;
265 cmd_msgid
|= msg
->wait_for_compl
? CMD_MSGID_RESP_REQ
: 0;
266 cmd_msgid
|= CMD_MSGID_WRITE
;
268 cmd_complete
= read_tcs_reg(drv
, RSC_DRV_CMD_WAIT_FOR_CMPL
, tcs_id
, 0);
270 for (i
= 0, j
= cmd_id
; i
< msg
->num_cmds
; i
++, j
++) {
272 cmd_enable
|= BIT(j
);
273 cmd_complete
|= cmd
->wait
<< j
;
275 msgid
|= cmd
->wait
? CMD_MSGID_RESP_REQ
: 0;
277 write_tcs_cmd(drv
, RSC_DRV_CMD_MSGID
, tcs_id
, j
, msgid
);
278 write_tcs_cmd(drv
, RSC_DRV_CMD_ADDR
, tcs_id
, j
, cmd
->addr
);
279 write_tcs_cmd(drv
, RSC_DRV_CMD_DATA
, tcs_id
, j
, cmd
->data
);
280 trace_rpmh_send_msg(drv
, tcs_id
, j
, msgid
, cmd
);
283 write_tcs_reg(drv
, RSC_DRV_CMD_WAIT_FOR_CMPL
, tcs_id
, cmd_complete
);
284 cmd_enable
|= read_tcs_reg(drv
, RSC_DRV_CMD_ENABLE
, tcs_id
, 0);
285 write_tcs_reg(drv
, RSC_DRV_CMD_ENABLE
, tcs_id
, cmd_enable
);
288 static void __tcs_trigger(struct rsc_drv
*drv
, int tcs_id
)
293 * HW req: Clear the DRV_CONTROL and enable TCS again
294 * While clearing ensure that the AMC mode trigger is cleared
295 * and then the mode enable is cleared.
297 enable
= read_tcs_reg(drv
, RSC_DRV_CONTROL
, tcs_id
, 0);
298 enable
&= ~TCS_AMC_MODE_TRIGGER
;
299 write_tcs_reg_sync(drv
, RSC_DRV_CONTROL
, tcs_id
, enable
);
300 enable
&= ~TCS_AMC_MODE_ENABLE
;
301 write_tcs_reg_sync(drv
, RSC_DRV_CONTROL
, tcs_id
, enable
);
303 /* Enable the AMC mode on the TCS and then trigger the TCS */
304 enable
= TCS_AMC_MODE_ENABLE
;
305 write_tcs_reg_sync(drv
, RSC_DRV_CONTROL
, tcs_id
, enable
);
306 enable
|= TCS_AMC_MODE_TRIGGER
;
307 write_tcs_reg_sync(drv
, RSC_DRV_CONTROL
, tcs_id
, enable
);
310 static int check_for_req_inflight(struct rsc_drv
*drv
, struct tcs_group
*tcs
,
311 const struct tcs_request
*msg
)
313 unsigned long curr_enabled
;
316 int tcs_id
= tcs
->offset
;
318 for (i
= 0; i
< tcs
->num_tcs
; i
++, tcs_id
++) {
319 if (tcs_is_free(drv
, tcs_id
))
322 curr_enabled
= read_tcs_reg(drv
, RSC_DRV_CMD_ENABLE
, tcs_id
, 0);
324 for_each_set_bit(j
, &curr_enabled
, MAX_CMDS_PER_TCS
) {
325 addr
= read_tcs_reg(drv
, RSC_DRV_CMD_ADDR
, tcs_id
, j
);
326 for (k
= 0; k
< msg
->num_cmds
; k
++) {
327 if (addr
== msg
->cmds
[k
].addr
)
336 static int find_free_tcs(struct tcs_group
*tcs
)
340 for (i
= 0; i
< tcs
->num_tcs
; i
++) {
341 if (tcs_is_free(tcs
->drv
, tcs
->offset
+ i
))
342 return tcs
->offset
+ i
;
348 static int tcs_write(struct rsc_drv
*drv
, const struct tcs_request
*msg
)
350 struct tcs_group
*tcs
;
355 tcs
= get_tcs_for_msg(drv
, msg
);
359 spin_lock_irqsave(&tcs
->lock
, flags
);
360 spin_lock(&drv
->lock
);
362 * The h/w does not like if we send a request to the same address,
363 * when one is already in-flight or being processed.
365 ret
= check_for_req_inflight(drv
, tcs
, msg
);
367 spin_unlock(&drv
->lock
);
371 tcs_id
= find_free_tcs(tcs
);
374 spin_unlock(&drv
->lock
);
378 tcs
->req
[tcs_id
- tcs
->offset
] = msg
;
379 set_bit(tcs_id
, drv
->tcs_in_use
);
380 spin_unlock(&drv
->lock
);
382 __tcs_buffer_write(drv
, tcs_id
, 0, msg
);
383 __tcs_trigger(drv
, tcs_id
);
386 spin_unlock_irqrestore(&tcs
->lock
, flags
);
391 * rpmh_rsc_send_data: Validate the incoming message and write to the
392 * appropriate TCS block.
394 * @drv: the controller
395 * @msg: the data to be sent
397 * Return: 0 on success, -EINVAL on error.
398 * Note: This call blocks until a valid data is written to the TCS.
400 int rpmh_rsc_send_data(struct rsc_drv
*drv
, const struct tcs_request
*msg
)
404 if (!msg
|| !msg
->cmds
|| !msg
->num_cmds
||
405 msg
->num_cmds
> MAX_RPMH_PAYLOAD
) {
411 ret
= tcs_write(drv
, msg
);
413 pr_info_ratelimited("TCS Busy, retrying RPMH message send: addr=%#x\n",
417 } while (ret
== -EBUSY
);
422 static int find_match(const struct tcs_group
*tcs
, const struct tcs_cmd
*cmd
,
427 /* Check for already cached commands */
428 for_each_set_bit(i
, tcs
->slots
, MAX_TCS_SLOTS
) {
429 if (tcs
->cmd_cache
[i
] != cmd
[0].addr
)
431 if (i
+ len
>= tcs
->num_tcs
* tcs
->ncpt
)
433 for (j
= 0; j
< len
; j
++) {
434 if (tcs
->cmd_cache
[i
+ j
] != cmd
[j
].addr
)
443 WARN(1, "Message does not match previous sequence.\n");
447 static int find_slots(struct tcs_group
*tcs
, const struct tcs_request
*msg
,
448 int *tcs_id
, int *cmd_id
)
453 /* Find if we already have the msg in our TCS */
454 slot
= find_match(tcs
, msg
->cmds
, msg
->num_cmds
);
458 /* Do over, until we can fit the full payload in a TCS */
460 slot
= bitmap_find_next_zero_area(tcs
->slots
, MAX_TCS_SLOTS
,
461 i
, msg
->num_cmds
, 0);
462 if (slot
>= tcs
->num_tcs
* tcs
->ncpt
)
465 } while (slot
+ msg
->num_cmds
- 1 >= i
);
468 bitmap_set(tcs
->slots
, slot
, msg
->num_cmds
);
469 /* Copy the addresses of the resources over to the slots */
470 for (i
= 0; i
< msg
->num_cmds
; i
++)
471 tcs
->cmd_cache
[slot
+ i
] = msg
->cmds
[i
].addr
;
473 offset
= slot
/ tcs
->ncpt
;
474 *tcs_id
= offset
+ tcs
->offset
;
475 *cmd_id
= slot
% tcs
->ncpt
;
480 static int tcs_ctrl_write(struct rsc_drv
*drv
, const struct tcs_request
*msg
)
482 struct tcs_group
*tcs
;
483 int tcs_id
= 0, cmd_id
= 0;
487 tcs
= get_tcs_for_msg(drv
, msg
);
491 spin_lock_irqsave(&tcs
->lock
, flags
);
492 /* find the TCS id and the command in the TCS to write to */
493 ret
= find_slots(tcs
, msg
, &tcs_id
, &cmd_id
);
495 __tcs_buffer_write(drv
, tcs_id
, cmd_id
, msg
);
496 spin_unlock_irqrestore(&tcs
->lock
, flags
);
502 * rpmh_rsc_write_ctrl_data: Write request to the controller
504 * @drv: the controller
505 * @msg: the data to be written to the controller
507 * There is no response returned for writing the request to the controller.
509 int rpmh_rsc_write_ctrl_data(struct rsc_drv
*drv
, const struct tcs_request
*msg
)
511 if (!msg
|| !msg
->cmds
|| !msg
->num_cmds
||
512 msg
->num_cmds
> MAX_RPMH_PAYLOAD
) {
513 pr_err("Payload error\n");
517 /* Data sent to this API will not be sent immediately */
518 if (msg
->state
== RPMH_ACTIVE_ONLY_STATE
)
521 return tcs_ctrl_write(drv
, msg
);
524 static int rpmh_probe_tcs_config(struct platform_device
*pdev
,
527 struct tcs_type_config
{
530 } tcs_cfg
[TCS_TYPE_NR
] = { { 0 } };
531 struct device_node
*dn
= pdev
->dev
.of_node
;
532 u32 config
, max_tcs
, ncpt
, offset
;
533 int i
, ret
, n
, st
= 0;
534 struct tcs_group
*tcs
;
535 struct resource
*res
;
537 char drv_id
[10] = {0};
539 snprintf(drv_id
, ARRAY_SIZE(drv_id
), "drv-%d", drv
->id
);
540 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, drv_id
);
541 base
= devm_ioremap_resource(&pdev
->dev
, res
);
543 return PTR_ERR(base
);
545 ret
= of_property_read_u32(dn
, "qcom,tcs-offset", &offset
);
548 drv
->tcs_base
= base
+ offset
;
550 config
= readl_relaxed(base
+ DRV_PRNT_CHLD_CONFIG
);
553 max_tcs
&= DRV_NUM_TCS_MASK
<< (DRV_NUM_TCS_SHIFT
* drv
->id
);
554 max_tcs
= max_tcs
>> (DRV_NUM_TCS_SHIFT
* drv
->id
);
556 ncpt
= config
& (DRV_NCPT_MASK
<< DRV_NCPT_SHIFT
);
557 ncpt
= ncpt
>> DRV_NCPT_SHIFT
;
559 n
= of_property_count_u32_elems(dn
, "qcom,tcs-config");
560 if (n
!= 2 * TCS_TYPE_NR
)
563 for (i
= 0; i
< TCS_TYPE_NR
; i
++) {
564 ret
= of_property_read_u32_index(dn
, "qcom,tcs-config",
565 i
* 2, &tcs_cfg
[i
].type
);
568 if (tcs_cfg
[i
].type
>= TCS_TYPE_NR
)
571 ret
= of_property_read_u32_index(dn
, "qcom,tcs-config",
572 i
* 2 + 1, &tcs_cfg
[i
].n
);
575 if (tcs_cfg
[i
].n
> MAX_TCS_PER_TYPE
)
579 for (i
= 0; i
< TCS_TYPE_NR
; i
++) {
580 tcs
= &drv
->tcs
[tcs_cfg
[i
].type
];
584 tcs
->type
= tcs_cfg
[i
].type
;
585 tcs
->num_tcs
= tcs_cfg
[i
].n
;
587 spin_lock_init(&tcs
->lock
);
589 if (!tcs
->num_tcs
|| tcs
->type
== CONTROL_TCS
)
592 if (st
+ tcs
->num_tcs
> max_tcs
||
593 st
+ tcs
->num_tcs
>= BITS_PER_BYTE
* sizeof(tcs
->mask
))
596 tcs
->mask
= ((1 << tcs
->num_tcs
) - 1) << st
;
601 * Allocate memory to cache sleep and wake requests to
602 * avoid reading TCS register memory.
604 if (tcs
->type
== ACTIVE_TCS
)
607 tcs
->cmd_cache
= devm_kcalloc(&pdev
->dev
,
608 tcs
->num_tcs
* ncpt
, sizeof(u32
),
619 static int rpmh_rsc_probe(struct platform_device
*pdev
)
621 struct device_node
*dn
= pdev
->dev
.of_node
;
626 * Even though RPMh doesn't directly use cmd-db, all of its children
627 * do. To avoid adding this check to our children we'll do it now.
629 ret
= cmd_db_ready();
631 if (ret
!= -EPROBE_DEFER
)
632 dev_err(&pdev
->dev
, "Command DB not available (%d)\n",
637 drv
= devm_kzalloc(&pdev
->dev
, sizeof(*drv
), GFP_KERNEL
);
641 ret
= of_property_read_u32(dn
, "qcom,drv-id", &drv
->id
);
645 drv
->name
= of_get_property(dn
, "label", NULL
);
647 drv
->name
= dev_name(&pdev
->dev
);
649 ret
= rpmh_probe_tcs_config(pdev
, drv
);
653 spin_lock_init(&drv
->lock
);
654 bitmap_zero(drv
->tcs_in_use
, MAX_TCS_NR
);
656 irq
= platform_get_irq(pdev
, drv
->id
);
660 ret
= devm_request_irq(&pdev
->dev
, irq
, tcs_tx_done
,
661 IRQF_TRIGGER_HIGH
| IRQF_NO_SUSPEND
,
666 /* Enable the active TCS to send requests immediately */
667 write_tcs_reg(drv
, RSC_DRV_IRQ_ENABLE
, 0, drv
->tcs
[ACTIVE_TCS
].mask
);
669 spin_lock_init(&drv
->client
.cache_lock
);
670 INIT_LIST_HEAD(&drv
->client
.cache
);
671 INIT_LIST_HEAD(&drv
->client
.batch_cache
);
673 dev_set_drvdata(&pdev
->dev
, drv
);
675 return devm_of_platform_populate(&pdev
->dev
);
678 static const struct of_device_id rpmh_drv_match
[] = {
679 { .compatible
= "qcom,rpmh-rsc", },
683 static struct platform_driver rpmh_driver
= {
684 .probe
= rpmh_rsc_probe
,
687 .of_match_table
= rpmh_drv_match
,
691 static int __init
rpmh_driver_init(void)
693 return platform_driver_register(&rpmh_driver
);
695 arch_initcall(rpmh_driver_init
);