1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
6 #define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
8 #include <linux/atomic.h>
9 #include <linux/delay.h>
10 #include <linux/interrupt.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
15 #include <linux/of_irq.h>
16 #include <linux/of_platform.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
21 #include <soc/qcom/cmd-db.h>
22 #include <soc/qcom/tcs.h>
23 #include <dt-bindings/soc/qcom,rpmh-rsc.h>
25 #include "rpmh-internal.h"
27 #define CREATE_TRACE_POINTS
28 #include "trace-rpmh.h"
30 #define RSC_DRV_TCS_OFFSET 672
31 #define RSC_DRV_CMD_OFFSET 20
33 /* DRV Configuration Information Register */
34 #define DRV_PRNT_CHLD_CONFIG 0x0C
35 #define DRV_NUM_TCS_MASK 0x3F
36 #define DRV_NUM_TCS_SHIFT 6
37 #define DRV_NCPT_MASK 0x1F
38 #define DRV_NCPT_SHIFT 27
40 /* Register offsets */
41 #define RSC_DRV_IRQ_ENABLE 0x00
42 #define RSC_DRV_IRQ_STATUS 0x04
43 #define RSC_DRV_IRQ_CLEAR 0x08
44 #define RSC_DRV_CMD_WAIT_FOR_CMPL 0x10
45 #define RSC_DRV_CONTROL 0x14
46 #define RSC_DRV_STATUS 0x18
47 #define RSC_DRV_CMD_ENABLE 0x1C
48 #define RSC_DRV_CMD_MSGID 0x30
49 #define RSC_DRV_CMD_ADDR 0x34
50 #define RSC_DRV_CMD_DATA 0x38
51 #define RSC_DRV_CMD_STATUS 0x3C
52 #define RSC_DRV_CMD_RESP_DATA 0x40
54 #define TCS_AMC_MODE_ENABLE BIT(16)
55 #define TCS_AMC_MODE_TRIGGER BIT(24)
57 /* TCS CMD register bit mask */
58 #define CMD_MSGID_LEN 8
59 #define CMD_MSGID_RESP_REQ BIT(8)
60 #define CMD_MSGID_WRITE BIT(16)
61 #define CMD_STATUS_ISSUED BIT(8)
62 #define CMD_STATUS_COMPL BIT(16)
64 static u32
read_tcs_reg(struct rsc_drv
*drv
, int reg
, int tcs_id
, int cmd_id
)
66 return readl_relaxed(drv
->tcs_base
+ reg
+ RSC_DRV_TCS_OFFSET
* tcs_id
+
67 RSC_DRV_CMD_OFFSET
* cmd_id
);
70 static void write_tcs_cmd(struct rsc_drv
*drv
, int reg
, int tcs_id
, int cmd_id
,
73 writel_relaxed(data
, drv
->tcs_base
+ reg
+ RSC_DRV_TCS_OFFSET
* tcs_id
+
74 RSC_DRV_CMD_OFFSET
* cmd_id
);
77 static void write_tcs_reg(struct rsc_drv
*drv
, int reg
, int tcs_id
, u32 data
)
79 writel_relaxed(data
, drv
->tcs_base
+ reg
+ RSC_DRV_TCS_OFFSET
* tcs_id
);
82 static void write_tcs_reg_sync(struct rsc_drv
*drv
, int reg
, int tcs_id
,
85 writel(data
, drv
->tcs_base
+ reg
+ RSC_DRV_TCS_OFFSET
* tcs_id
);
87 if (data
== readl(drv
->tcs_base
+ reg
+
88 RSC_DRV_TCS_OFFSET
* tcs_id
))
94 static bool tcs_is_free(struct rsc_drv
*drv
, int tcs_id
)
96 return !test_bit(tcs_id
, drv
->tcs_in_use
) &&
97 read_tcs_reg(drv
, RSC_DRV_STATUS
, tcs_id
, 0);
100 static struct tcs_group
*get_tcs_of_type(struct rsc_drv
*drv
, int type
)
102 return &drv
->tcs
[type
];
105 static int tcs_invalidate(struct rsc_drv
*drv
, int type
)
108 struct tcs_group
*tcs
;
110 tcs
= get_tcs_of_type(drv
, type
);
112 spin_lock(&tcs
->lock
);
113 if (bitmap_empty(tcs
->slots
, MAX_TCS_SLOTS
)) {
114 spin_unlock(&tcs
->lock
);
118 for (m
= tcs
->offset
; m
< tcs
->offset
+ tcs
->num_tcs
; m
++) {
119 if (!tcs_is_free(drv
, m
)) {
120 spin_unlock(&tcs
->lock
);
123 write_tcs_reg_sync(drv
, RSC_DRV_CMD_ENABLE
, m
, 0);
125 bitmap_zero(tcs
->slots
, MAX_TCS_SLOTS
);
126 spin_unlock(&tcs
->lock
);
132 * rpmh_rsc_invalidate - Invalidate sleep and wake TCSes
134 * @drv: the RSC controller
136 int rpmh_rsc_invalidate(struct rsc_drv
*drv
)
140 ret
= tcs_invalidate(drv
, SLEEP_TCS
);
142 ret
= tcs_invalidate(drv
, WAKE_TCS
);
147 static struct tcs_group
*get_tcs_for_msg(struct rsc_drv
*drv
,
148 const struct tcs_request
*msg
)
151 struct tcs_group
*tcs
;
153 switch (msg
->state
) {
154 case RPMH_ACTIVE_ONLY_STATE
:
157 case RPMH_WAKE_ONLY_STATE
:
160 case RPMH_SLEEP_STATE
:
164 return ERR_PTR(-EINVAL
);
168 * If we are making an active request on a RSC that does not have a
169 * dedicated TCS for active state use, then re-purpose a wake TCS to
171 * NOTE: The driver must be aware that this RSC does not have a
172 * dedicated AMC, and therefore would invalidate the sleep and wake
173 * TCSes before making an active state request.
175 tcs
= get_tcs_of_type(drv
, type
);
176 if (msg
->state
== RPMH_ACTIVE_ONLY_STATE
&& !tcs
->num_tcs
) {
177 tcs
= get_tcs_of_type(drv
, WAKE_TCS
);
179 ret
= rpmh_rsc_invalidate(drv
);
188 static const struct tcs_request
*get_req_from_tcs(struct rsc_drv
*drv
,
191 struct tcs_group
*tcs
;
194 for (i
= 0; i
< TCS_TYPE_NR
; i
++) {
196 if (tcs
->mask
& BIT(tcs_id
))
197 return tcs
->req
[tcs_id
- tcs
->offset
];
204 * tcs_tx_done: TX Done interrupt handler
206 static irqreturn_t
tcs_tx_done(int irq
, void *p
)
208 struct rsc_drv
*drv
= p
;
210 unsigned long irq_status
;
211 const struct tcs_request
*req
;
214 irq_status
= read_tcs_reg(drv
, RSC_DRV_IRQ_STATUS
, 0, 0);
216 for_each_set_bit(i
, &irq_status
, BITS_PER_LONG
) {
217 req
= get_req_from_tcs(drv
, i
);
224 for (j
= 0; j
< req
->num_cmds
; j
++) {
228 sts
= read_tcs_reg(drv
, RSC_DRV_CMD_STATUS
, i
, j
);
229 if (!(sts
& CMD_STATUS_ISSUED
) ||
230 ((req
->wait_for_compl
|| cmd
->wait
) &&
231 !(sts
& CMD_STATUS_COMPL
))) {
232 pr_err("Incomplete request: %s: addr=%#x data=%#x",
233 drv
->name
, cmd
->addr
, cmd
->data
);
238 trace_rpmh_tx_done(drv
, i
, req
, err
);
240 /* Reclaim the TCS */
241 write_tcs_reg(drv
, RSC_DRV_CMD_ENABLE
, i
, 0);
242 write_tcs_reg(drv
, RSC_DRV_IRQ_CLEAR
, 0, BIT(i
));
243 spin_lock(&drv
->lock
);
244 clear_bit(i
, drv
->tcs_in_use
);
245 spin_unlock(&drv
->lock
);
247 rpmh_tx_done(req
, err
);
253 static void __tcs_buffer_write(struct rsc_drv
*drv
, int tcs_id
, int cmd_id
,
254 const struct tcs_request
*msg
)
256 u32 msgid
, cmd_msgid
;
262 cmd_msgid
= CMD_MSGID_LEN
;
263 cmd_msgid
|= msg
->wait_for_compl
? CMD_MSGID_RESP_REQ
: 0;
264 cmd_msgid
|= CMD_MSGID_WRITE
;
266 cmd_complete
= read_tcs_reg(drv
, RSC_DRV_CMD_WAIT_FOR_CMPL
, tcs_id
, 0);
268 for (i
= 0, j
= cmd_id
; i
< msg
->num_cmds
; i
++, j
++) {
270 cmd_enable
|= BIT(j
);
271 cmd_complete
|= cmd
->wait
<< j
;
273 msgid
|= cmd
->wait
? CMD_MSGID_RESP_REQ
: 0;
275 write_tcs_cmd(drv
, RSC_DRV_CMD_MSGID
, tcs_id
, j
, msgid
);
276 write_tcs_cmd(drv
, RSC_DRV_CMD_ADDR
, tcs_id
, j
, cmd
->addr
);
277 write_tcs_cmd(drv
, RSC_DRV_CMD_DATA
, tcs_id
, j
, cmd
->data
);
278 trace_rpmh_send_msg(drv
, tcs_id
, j
, msgid
, cmd
);
281 write_tcs_reg(drv
, RSC_DRV_CMD_WAIT_FOR_CMPL
, tcs_id
, cmd_complete
);
282 cmd_enable
|= read_tcs_reg(drv
, RSC_DRV_CMD_ENABLE
, tcs_id
, 0);
283 write_tcs_reg(drv
, RSC_DRV_CMD_ENABLE
, tcs_id
, cmd_enable
);
286 static void __tcs_trigger(struct rsc_drv
*drv
, int tcs_id
)
291 * HW req: Clear the DRV_CONTROL and enable TCS again
292 * While clearing ensure that the AMC mode trigger is cleared
293 * and then the mode enable is cleared.
295 enable
= read_tcs_reg(drv
, RSC_DRV_CONTROL
, tcs_id
, 0);
296 enable
&= ~TCS_AMC_MODE_TRIGGER
;
297 write_tcs_reg_sync(drv
, RSC_DRV_CONTROL
, tcs_id
, enable
);
298 enable
&= ~TCS_AMC_MODE_ENABLE
;
299 write_tcs_reg_sync(drv
, RSC_DRV_CONTROL
, tcs_id
, enable
);
301 /* Enable the AMC mode on the TCS and then trigger the TCS */
302 enable
= TCS_AMC_MODE_ENABLE
;
303 write_tcs_reg_sync(drv
, RSC_DRV_CONTROL
, tcs_id
, enable
);
304 enable
|= TCS_AMC_MODE_TRIGGER
;
305 write_tcs_reg_sync(drv
, RSC_DRV_CONTROL
, tcs_id
, enable
);
308 static int check_for_req_inflight(struct rsc_drv
*drv
, struct tcs_group
*tcs
,
309 const struct tcs_request
*msg
)
311 unsigned long curr_enabled
;
314 int tcs_id
= tcs
->offset
;
316 for (i
= 0; i
< tcs
->num_tcs
; i
++, tcs_id
++) {
317 if (tcs_is_free(drv
, tcs_id
))
320 curr_enabled
= read_tcs_reg(drv
, RSC_DRV_CMD_ENABLE
, tcs_id
, 0);
322 for_each_set_bit(j
, &curr_enabled
, MAX_CMDS_PER_TCS
) {
323 addr
= read_tcs_reg(drv
, RSC_DRV_CMD_ADDR
, tcs_id
, j
);
324 for (k
= 0; k
< msg
->num_cmds
; k
++) {
325 if (addr
== msg
->cmds
[k
].addr
)
334 static int find_free_tcs(struct tcs_group
*tcs
)
338 for (i
= 0; i
< tcs
->num_tcs
; i
++) {
339 if (tcs_is_free(tcs
->drv
, tcs
->offset
+ i
))
340 return tcs
->offset
+ i
;
346 static int tcs_write(struct rsc_drv
*drv
, const struct tcs_request
*msg
)
348 struct tcs_group
*tcs
;
353 tcs
= get_tcs_for_msg(drv
, msg
);
357 spin_lock_irqsave(&tcs
->lock
, flags
);
358 spin_lock(&drv
->lock
);
360 * The h/w does not like if we send a request to the same address,
361 * when one is already in-flight or being processed.
363 ret
= check_for_req_inflight(drv
, tcs
, msg
);
365 spin_unlock(&drv
->lock
);
369 tcs_id
= find_free_tcs(tcs
);
372 spin_unlock(&drv
->lock
);
376 tcs
->req
[tcs_id
- tcs
->offset
] = msg
;
377 set_bit(tcs_id
, drv
->tcs_in_use
);
378 spin_unlock(&drv
->lock
);
380 __tcs_buffer_write(drv
, tcs_id
, 0, msg
);
381 __tcs_trigger(drv
, tcs_id
);
384 spin_unlock_irqrestore(&tcs
->lock
, flags
);
389 * rpmh_rsc_send_data: Validate the incoming message and write to the
390 * appropriate TCS block.
392 * @drv: the controller
393 * @msg: the data to be sent
395 * Return: 0 on success, -EINVAL on error.
396 * Note: This call blocks until a valid data is written to the TCS.
398 int rpmh_rsc_send_data(struct rsc_drv
*drv
, const struct tcs_request
*msg
)
402 if (!msg
|| !msg
->cmds
|| !msg
->num_cmds
||
403 msg
->num_cmds
> MAX_RPMH_PAYLOAD
) {
409 ret
= tcs_write(drv
, msg
);
411 pr_info_ratelimited("TCS Busy, retrying RPMH message send: addr=%#x\n",
415 } while (ret
== -EBUSY
);
420 static int find_match(const struct tcs_group
*tcs
, const struct tcs_cmd
*cmd
,
425 /* Check for already cached commands */
426 for_each_set_bit(i
, tcs
->slots
, MAX_TCS_SLOTS
) {
427 if (tcs
->cmd_cache
[i
] != cmd
[0].addr
)
429 if (i
+ len
>= tcs
->num_tcs
* tcs
->ncpt
)
431 for (j
= 0; j
< len
; j
++) {
432 if (tcs
->cmd_cache
[i
+ j
] != cmd
[j
].addr
)
441 WARN(1, "Message does not match previous sequence.\n");
445 static int find_slots(struct tcs_group
*tcs
, const struct tcs_request
*msg
,
446 int *tcs_id
, int *cmd_id
)
451 /* Find if we already have the msg in our TCS */
452 slot
= find_match(tcs
, msg
->cmds
, msg
->num_cmds
);
456 /* Do over, until we can fit the full payload in a TCS */
458 slot
= bitmap_find_next_zero_area(tcs
->slots
, MAX_TCS_SLOTS
,
459 i
, msg
->num_cmds
, 0);
460 if (slot
== tcs
->num_tcs
* tcs
->ncpt
)
463 } while (slot
+ msg
->num_cmds
- 1 >= i
);
466 bitmap_set(tcs
->slots
, slot
, msg
->num_cmds
);
467 /* Copy the addresses of the resources over to the slots */
468 for (i
= 0; i
< msg
->num_cmds
; i
++)
469 tcs
->cmd_cache
[slot
+ i
] = msg
->cmds
[i
].addr
;
471 offset
= slot
/ tcs
->ncpt
;
472 *tcs_id
= offset
+ tcs
->offset
;
473 *cmd_id
= slot
% tcs
->ncpt
;
478 static int tcs_ctrl_write(struct rsc_drv
*drv
, const struct tcs_request
*msg
)
480 struct tcs_group
*tcs
;
481 int tcs_id
= 0, cmd_id
= 0;
485 tcs
= get_tcs_for_msg(drv
, msg
);
489 spin_lock_irqsave(&tcs
->lock
, flags
);
490 /* find the TCS id and the command in the TCS to write to */
491 ret
= find_slots(tcs
, msg
, &tcs_id
, &cmd_id
);
493 __tcs_buffer_write(drv
, tcs_id
, cmd_id
, msg
);
494 spin_unlock_irqrestore(&tcs
->lock
, flags
);
500 * rpmh_rsc_write_ctrl_data: Write request to the controller
502 * @drv: the controller
503 * @msg: the data to be written to the controller
505 * There is no response returned for writing the request to the controller.
507 int rpmh_rsc_write_ctrl_data(struct rsc_drv
*drv
, const struct tcs_request
*msg
)
509 if (!msg
|| !msg
->cmds
|| !msg
->num_cmds
||
510 msg
->num_cmds
> MAX_RPMH_PAYLOAD
) {
511 pr_err("Payload error\n");
515 /* Data sent to this API will not be sent immediately */
516 if (msg
->state
== RPMH_ACTIVE_ONLY_STATE
)
519 return tcs_ctrl_write(drv
, msg
);
522 static int rpmh_probe_tcs_config(struct platform_device
*pdev
,
525 struct tcs_type_config
{
528 } tcs_cfg
[TCS_TYPE_NR
] = { { 0 } };
529 struct device_node
*dn
= pdev
->dev
.of_node
;
530 u32 config
, max_tcs
, ncpt
, offset
;
531 int i
, ret
, n
, st
= 0;
532 struct tcs_group
*tcs
;
533 struct resource
*res
;
535 char drv_id
[10] = {0};
537 snprintf(drv_id
, ARRAY_SIZE(drv_id
), "drv-%d", drv
->id
);
538 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, drv_id
);
539 base
= devm_ioremap_resource(&pdev
->dev
, res
);
541 return PTR_ERR(base
);
543 ret
= of_property_read_u32(dn
, "qcom,tcs-offset", &offset
);
546 drv
->tcs_base
= base
+ offset
;
548 config
= readl_relaxed(base
+ DRV_PRNT_CHLD_CONFIG
);
551 max_tcs
&= DRV_NUM_TCS_MASK
<< (DRV_NUM_TCS_SHIFT
* drv
->id
);
552 max_tcs
= max_tcs
>> (DRV_NUM_TCS_SHIFT
* drv
->id
);
554 ncpt
= config
& (DRV_NCPT_MASK
<< DRV_NCPT_SHIFT
);
555 ncpt
= ncpt
>> DRV_NCPT_SHIFT
;
557 n
= of_property_count_u32_elems(dn
, "qcom,tcs-config");
558 if (n
!= 2 * TCS_TYPE_NR
)
561 for (i
= 0; i
< TCS_TYPE_NR
; i
++) {
562 ret
= of_property_read_u32_index(dn
, "qcom,tcs-config",
563 i
* 2, &tcs_cfg
[i
].type
);
566 if (tcs_cfg
[i
].type
>= TCS_TYPE_NR
)
569 ret
= of_property_read_u32_index(dn
, "qcom,tcs-config",
570 i
* 2 + 1, &tcs_cfg
[i
].n
);
573 if (tcs_cfg
[i
].n
> MAX_TCS_PER_TYPE
)
577 for (i
= 0; i
< TCS_TYPE_NR
; i
++) {
578 tcs
= &drv
->tcs
[tcs_cfg
[i
].type
];
582 tcs
->type
= tcs_cfg
[i
].type
;
583 tcs
->num_tcs
= tcs_cfg
[i
].n
;
585 spin_lock_init(&tcs
->lock
);
587 if (!tcs
->num_tcs
|| tcs
->type
== CONTROL_TCS
)
590 if (st
+ tcs
->num_tcs
> max_tcs
||
591 st
+ tcs
->num_tcs
>= BITS_PER_BYTE
* sizeof(tcs
->mask
))
594 tcs
->mask
= ((1 << tcs
->num_tcs
) - 1) << st
;
599 * Allocate memory to cache sleep and wake requests to
600 * avoid reading TCS register memory.
602 if (tcs
->type
== ACTIVE_TCS
)
605 tcs
->cmd_cache
= devm_kcalloc(&pdev
->dev
,
606 tcs
->num_tcs
* ncpt
, sizeof(u32
),
617 static int rpmh_rsc_probe(struct platform_device
*pdev
)
619 struct device_node
*dn
= pdev
->dev
.of_node
;
624 * Even though RPMh doesn't directly use cmd-db, all of its children
625 * do. To avoid adding this check to our children we'll do it now.
627 ret
= cmd_db_ready();
629 if (ret
!= -EPROBE_DEFER
)
630 dev_err(&pdev
->dev
, "Command DB not available (%d)\n",
635 drv
= devm_kzalloc(&pdev
->dev
, sizeof(*drv
), GFP_KERNEL
);
639 ret
= of_property_read_u32(dn
, "qcom,drv-id", &drv
->id
);
643 drv
->name
= of_get_property(dn
, "label", NULL
);
645 drv
->name
= dev_name(&pdev
->dev
);
647 ret
= rpmh_probe_tcs_config(pdev
, drv
);
651 spin_lock_init(&drv
->lock
);
652 bitmap_zero(drv
->tcs_in_use
, MAX_TCS_NR
);
654 irq
= platform_get_irq(pdev
, drv
->id
);
658 ret
= devm_request_irq(&pdev
->dev
, irq
, tcs_tx_done
,
659 IRQF_TRIGGER_HIGH
| IRQF_NO_SUSPEND
,
664 /* Enable the active TCS to send requests immediately */
665 write_tcs_reg(drv
, RSC_DRV_IRQ_ENABLE
, 0, drv
->tcs
[ACTIVE_TCS
].mask
);
667 spin_lock_init(&drv
->client
.cache_lock
);
668 INIT_LIST_HEAD(&drv
->client
.cache
);
669 INIT_LIST_HEAD(&drv
->client
.batch_cache
);
671 dev_set_drvdata(&pdev
->dev
, drv
);
673 return devm_of_platform_populate(&pdev
->dev
);
676 static const struct of_device_id rpmh_drv_match
[] = {
677 { .compatible
= "qcom,rpmh-rsc", },
681 static struct platform_driver rpmh_driver
= {
682 .probe
= rpmh_rsc_probe
,
685 .of_match_table
= rpmh_drv_match
,
689 static int __init
rpmh_driver_init(void)
691 return platform_driver_register(&rpmh_driver
);
693 arch_initcall(rpmh_driver_init
);