1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020 Linaro Ltd
6 #include <linux/device.h>
7 #include <linux/interconnect-provider.h>
9 #include <linux/module.h>
11 #include <linux/of_platform.h>
12 #include <linux/platform_device.h>
13 #include <linux/regmap.h>
14 #include <linux/slab.h>
16 #include "icc-common.h"
20 #define QNOC_QOS_MCTL_LOWn_ADDR(n) (0x8 + (n * 0x1000))
21 #define QNOC_QOS_MCTL_DFLT_PRIO_MASK 0x70
22 #define QNOC_QOS_MCTL_DFLT_PRIO_SHIFT 4
23 #define QNOC_QOS_MCTL_URGFWD_EN_MASK 0x8
24 #define QNOC_QOS_MCTL_URGFWD_EN_SHIFT 3
27 #define M_BKE_REG_BASE(n) (0x300 + (0x4000 * n))
28 #define M_BKE_EN_ADDR(n) (M_BKE_REG_BASE(n))
29 #define M_BKE_HEALTH_CFG_ADDR(i, n) (M_BKE_REG_BASE(n) + 0x40 + (0x4 * i))
31 #define M_BKE_HEALTH_CFG_LIMITCMDS_MASK 0x80000000
32 #define M_BKE_HEALTH_CFG_AREQPRIO_MASK 0x300
33 #define M_BKE_HEALTH_CFG_PRIOLVL_MASK 0x3
34 #define M_BKE_HEALTH_CFG_AREQPRIO_SHIFT 0x8
35 #define M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT 0x1f
37 #define M_BKE_EN_EN_BMASK 0x1
40 #define NOC_QOS_PRIORITYn_ADDR(n) (0x8 + (n * 0x1000))
41 #define NOC_QOS_PRIORITY_P1_MASK 0xc
42 #define NOC_QOS_PRIORITY_P0_MASK 0x3
43 #define NOC_QOS_PRIORITY_P1_SHIFT 0x2
45 #define NOC_QOS_MODEn_ADDR(n) (0xc + (n * 0x1000))
46 #define NOC_QOS_MODEn_MASK 0x3
48 #define NOC_QOS_MODE_FIXED_VAL 0x0
49 #define NOC_QOS_MODE_BYPASS_VAL 0x2
51 #define ICC_BUS_CLK_MIN_RATE 19200ULL /* kHz */
53 static int qcom_icc_set_qnoc_qos(struct icc_node
*src
)
55 struct icc_provider
*provider
= src
->provider
;
56 struct qcom_icc_provider
*qp
= to_qcom_provider(provider
);
57 struct qcom_icc_node
*qn
= src
->data
;
58 struct qcom_icc_qos
*qos
= &qn
->qos
;
61 rc
= regmap_update_bits(qp
->regmap
,
62 qp
->qos_offset
+ QNOC_QOS_MCTL_LOWn_ADDR(qos
->qos_port
),
63 QNOC_QOS_MCTL_DFLT_PRIO_MASK
,
64 qos
->areq_prio
<< QNOC_QOS_MCTL_DFLT_PRIO_SHIFT
);
68 return regmap_update_bits(qp
->regmap
,
69 qp
->qos_offset
+ QNOC_QOS_MCTL_LOWn_ADDR(qos
->qos_port
),
70 QNOC_QOS_MCTL_URGFWD_EN_MASK
,
71 !!qos
->urg_fwd_en
<< QNOC_QOS_MCTL_URGFWD_EN_SHIFT
);
74 static int qcom_icc_bimc_set_qos_health(struct qcom_icc_provider
*qp
,
75 struct qcom_icc_qos
*qos
,
81 val
= qos
->prio_level
;
82 mask
= M_BKE_HEALTH_CFG_PRIOLVL_MASK
;
84 val
|= qos
->areq_prio
<< M_BKE_HEALTH_CFG_AREQPRIO_SHIFT
;
85 mask
|= M_BKE_HEALTH_CFG_AREQPRIO_MASK
;
87 /* LIMITCMDS is not present on M_BKE_HEALTH_3 */
89 val
|= qos
->limit_commands
<< M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT
;
90 mask
|= M_BKE_HEALTH_CFG_LIMITCMDS_MASK
;
93 return regmap_update_bits(qp
->regmap
,
94 qp
->qos_offset
+ M_BKE_HEALTH_CFG_ADDR(regnum
, qos
->qos_port
),
98 static int qcom_icc_set_bimc_qos(struct icc_node
*src
)
100 struct qcom_icc_provider
*qp
;
101 struct qcom_icc_node
*qn
;
102 struct icc_provider
*provider
;
103 u32 mode
= NOC_QOS_MODE_BYPASS
;
108 provider
= src
->provider
;
109 qp
= to_qcom_provider(provider
);
111 if (qn
->qos
.qos_mode
!= NOC_QOS_MODE_INVALID
)
112 mode
= qn
->qos
.qos_mode
;
114 /* QoS Priority: The QoS Health parameters are getting considered
115 * only if we are NOT in Bypass Mode.
117 if (mode
!= NOC_QOS_MODE_BYPASS
) {
118 for (i
= 3; i
>= 0; i
--) {
119 rc
= qcom_icc_bimc_set_qos_health(qp
,
125 /* Set BKE_EN to 1 when Fixed, Regulator or Limiter Mode */
129 return regmap_update_bits(qp
->regmap
,
130 qp
->qos_offset
+ M_BKE_EN_ADDR(qn
->qos
.qos_port
),
131 M_BKE_EN_EN_BMASK
, val
);
134 static int qcom_icc_noc_set_qos_priority(struct qcom_icc_provider
*qp
,
135 struct qcom_icc_qos
*qos
)
140 /* Must be updated one at a time, P1 first, P0 last */
141 val
= qos
->areq_prio
<< NOC_QOS_PRIORITY_P1_SHIFT
;
142 rc
= regmap_update_bits(qp
->regmap
,
143 qp
->qos_offset
+ NOC_QOS_PRIORITYn_ADDR(qos
->qos_port
),
144 NOC_QOS_PRIORITY_P1_MASK
, val
);
148 return regmap_update_bits(qp
->regmap
,
149 qp
->qos_offset
+ NOC_QOS_PRIORITYn_ADDR(qos
->qos_port
),
150 NOC_QOS_PRIORITY_P0_MASK
, qos
->prio_level
);
153 static int qcom_icc_set_noc_qos(struct icc_node
*src
)
155 struct qcom_icc_provider
*qp
;
156 struct qcom_icc_node
*qn
;
157 struct icc_provider
*provider
;
158 u32 mode
= NOC_QOS_MODE_BYPASS_VAL
;
162 provider
= src
->provider
;
163 qp
= to_qcom_provider(provider
);
165 if (qn
->qos
.qos_port
< 0) {
166 dev_dbg(src
->provider
->dev
,
167 "NoC QoS: Skipping %s: vote aggregated on parent.\n",
172 if (qn
->qos
.qos_mode
== NOC_QOS_MODE_FIXED
) {
173 dev_dbg(src
->provider
->dev
, "NoC QoS: %s: Set Fixed mode\n", qn
->name
);
174 mode
= NOC_QOS_MODE_FIXED_VAL
;
175 rc
= qcom_icc_noc_set_qos_priority(qp
, &qn
->qos
);
178 } else if (qn
->qos
.qos_mode
== NOC_QOS_MODE_BYPASS
) {
179 dev_dbg(src
->provider
->dev
, "NoC QoS: %s: Set Bypass mode\n", qn
->name
);
180 mode
= NOC_QOS_MODE_BYPASS_VAL
;
182 /* How did we get here? */
185 return regmap_update_bits(qp
->regmap
,
186 qp
->qos_offset
+ NOC_QOS_MODEn_ADDR(qn
->qos
.qos_port
),
187 NOC_QOS_MODEn_MASK
, mode
);
190 static int qcom_icc_qos_set(struct icc_node
*node
)
192 struct qcom_icc_provider
*qp
= to_qcom_provider(node
->provider
);
193 struct qcom_icc_node
*qn
= node
->data
;
195 dev_dbg(node
->provider
->dev
, "Setting QoS for %s\n", qn
->name
);
199 return qcom_icc_set_bimc_qos(node
);
201 return qcom_icc_set_qnoc_qos(node
);
203 return qcom_icc_set_noc_qos(node
);
207 static int qcom_icc_rpm_set(struct qcom_icc_node
*qn
, u64
*bw
)
209 int ret
, rpm_ctx
= 0;
212 if (qn
->qos
.ap_owned
)
215 for (rpm_ctx
= 0; rpm_ctx
< QCOM_SMD_RPM_STATE_NUM
; rpm_ctx
++) {
216 bw_bps
= icc_units_to_bps(bw
[rpm_ctx
]);
218 if (qn
->mas_rpm_id
!= -1) {
219 ret
= qcom_icc_rpm_smd_send(rpm_ctx
,
224 pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
225 qn
->mas_rpm_id
, ret
);
230 if (qn
->slv_rpm_id
!= -1) {
231 ret
= qcom_icc_rpm_smd_send(rpm_ctx
,
236 pr_err("qcom_icc_rpm_smd_send slv %d error %d\n",
237 qn
->slv_rpm_id
, ret
);
247 * qcom_icc_pre_bw_aggregate - cleans up values before re-aggregate requests
248 * @node: icc node to operate on
250 static void qcom_icc_pre_bw_aggregate(struct icc_node
*node
)
252 struct qcom_icc_node
*qn
;
256 for (i
= 0; i
< QCOM_SMD_RPM_STATE_NUM
; i
++) {
263 * qcom_icc_bw_aggregate - aggregate bw for buckets indicated by tag
264 * @node: node to aggregate
265 * @tag: tag to indicate which buckets to aggregate
266 * @avg_bw: new bw to sum aggregate
267 * @peak_bw: new bw to max aggregate
268 * @agg_avg: existing aggregate avg bw val
269 * @agg_peak: existing aggregate peak bw val
271 static int qcom_icc_bw_aggregate(struct icc_node
*node
, u32 tag
, u32 avg_bw
,
272 u32 peak_bw
, u32
*agg_avg
, u32
*agg_peak
)
275 struct qcom_icc_node
*qn
;
280 tag
= RPM_ALWAYS_TAG
;
282 for (i
= 0; i
< QCOM_SMD_RPM_STATE_NUM
; i
++) {
284 qn
->sum_avg
[i
] += avg_bw
;
285 qn
->max_peak
[i
] = max_t(u32
, qn
->max_peak
[i
], peak_bw
);
290 *agg_peak
= max_t(u32
, *agg_peak
, peak_bw
);
294 static u64
qcom_icc_calc_rate(struct qcom_icc_provider
*qp
, struct qcom_icc_node
*qn
, int ctx
)
296 u64 agg_avg_rate
, agg_peak_rate
, agg_rate
;
299 agg_avg_rate
= div_u64(qn
->sum_avg
[ctx
], qn
->channels
);
301 agg_avg_rate
= qn
->sum_avg
[ctx
];
304 agg_avg_rate
= agg_avg_rate
* qn
->ab_coeff
;
305 agg_avg_rate
= div_u64(agg_avg_rate
, 100);
309 agg_peak_rate
= qn
->max_peak
[ctx
] * 100;
310 agg_peak_rate
= div_u64(agg_peak_rate
, qn
->ib_coeff
);
312 agg_peak_rate
= qn
->max_peak
[ctx
];
315 agg_rate
= max_t(u64
, agg_avg_rate
, agg_peak_rate
);
317 return div_u64(agg_rate
, qn
->buswidth
);
321 * qcom_icc_bus_aggregate - calculate bus clock rates by traversing all nodes
322 * @provider: generic interconnect provider
323 * @agg_clk_rate: array containing the aggregated clock rates in kHz
325 static void qcom_icc_bus_aggregate(struct icc_provider
*provider
, u64
*agg_clk_rate
)
327 struct qcom_icc_provider
*qp
= to_qcom_provider(provider
);
328 struct qcom_icc_node
*qn
;
329 struct icc_node
*node
;
333 * Iterate nodes on the provider, aggregate bandwidth requests for
334 * every bucket and convert them into bus clock rates.
336 list_for_each_entry(node
, &provider
->nodes
, node_list
) {
338 for (ctx
= 0; ctx
< QCOM_SMD_RPM_STATE_NUM
; ctx
++) {
339 agg_clk_rate
[ctx
] = max_t(u64
, agg_clk_rate
[ctx
],
340 qcom_icc_calc_rate(qp
, qn
, ctx
));
345 static int qcom_icc_set(struct icc_node
*src
, struct icc_node
*dst
)
347 struct qcom_icc_node
*src_qn
= NULL
, *dst_qn
= NULL
;
348 u64 agg_clk_rate
[QCOM_SMD_RPM_STATE_NUM
] = { 0 };
349 struct icc_provider
*provider
;
350 struct qcom_icc_provider
*qp
;
351 u64 active_rate
, sleep_rate
;
357 provider
= src
->provider
;
358 qp
= to_qcom_provider(provider
);
360 qcom_icc_bus_aggregate(provider
, agg_clk_rate
);
361 active_rate
= agg_clk_rate
[QCOM_SMD_RPM_ACTIVE_STATE
];
362 sleep_rate
= agg_clk_rate
[QCOM_SMD_RPM_SLEEP_STATE
];
364 ret
= qcom_icc_rpm_set(src_qn
, src_qn
->sum_avg
);
369 ret
= qcom_icc_rpm_set(dst_qn
, dst_qn
->sum_avg
);
374 /* Some providers don't have a bus clock to scale */
375 if (!qp
->bus_clk_desc
&& !qp
->bus_clk
)
379 * Downstream checks whether the requested rate is zero, but it makes little sense
380 * to vote for a value that's below the lower threshold, so let's not do so.
383 active_rate
= max(ICC_BUS_CLK_MIN_RATE
, active_rate
);
385 /* Some providers have a non-RPM-owned bus clock - convert kHz->Hz for the CCF */
387 active_rate
= max_t(u64
, active_rate
, sleep_rate
);
388 /* ARM32 caps clk_set_rate arg to u32.. Nothing we can do about that! */
389 active_rate
= min_t(u64
, 1000ULL * active_rate
, ULONG_MAX
);
390 return clk_set_rate(qp
->bus_clk
, active_rate
);
393 /* RPM only accepts <=INT_MAX rates */
394 active_rate
= min_t(u64
, active_rate
, INT_MAX
);
395 sleep_rate
= min_t(u64
, sleep_rate
, INT_MAX
);
397 if (active_rate
!= qp
->bus_clk_rate
[QCOM_SMD_RPM_ACTIVE_STATE
]) {
398 ret
= qcom_icc_rpm_set_bus_rate(qp
->bus_clk_desc
, QCOM_SMD_RPM_ACTIVE_STATE
,
403 /* Cache the rate after we've successfully commited it to RPM */
404 qp
->bus_clk_rate
[QCOM_SMD_RPM_ACTIVE_STATE
] = active_rate
;
407 if (sleep_rate
!= qp
->bus_clk_rate
[QCOM_SMD_RPM_SLEEP_STATE
]) {
408 ret
= qcom_icc_rpm_set_bus_rate(qp
->bus_clk_desc
, QCOM_SMD_RPM_SLEEP_STATE
,
413 /* Cache the rate after we've successfully commited it to RPM */
414 qp
->bus_clk_rate
[QCOM_SMD_RPM_SLEEP_STATE
] = sleep_rate
;
417 /* Handle the node-specific clock */
418 if (!src_qn
->bus_clk_desc
)
421 active_rate
= qcom_icc_calc_rate(qp
, src_qn
, QCOM_SMD_RPM_ACTIVE_STATE
);
422 sleep_rate
= qcom_icc_calc_rate(qp
, src_qn
, QCOM_SMD_RPM_SLEEP_STATE
);
424 if (active_rate
!= src_qn
->bus_clk_rate
[QCOM_SMD_RPM_ACTIVE_STATE
]) {
425 ret
= qcom_icc_rpm_set_bus_rate(src_qn
->bus_clk_desc
, QCOM_SMD_RPM_ACTIVE_STATE
,
430 /* Cache the rate after we've successfully committed it to RPM */
431 src_qn
->bus_clk_rate
[QCOM_SMD_RPM_ACTIVE_STATE
] = active_rate
;
434 if (sleep_rate
!= src_qn
->bus_clk_rate
[QCOM_SMD_RPM_SLEEP_STATE
]) {
435 ret
= qcom_icc_rpm_set_bus_rate(src_qn
->bus_clk_desc
, QCOM_SMD_RPM_SLEEP_STATE
,
440 /* Cache the rate after we've successfully committed it to RPM */
441 src_qn
->bus_clk_rate
[QCOM_SMD_RPM_SLEEP_STATE
] = sleep_rate
;
447 int qnoc_probe(struct platform_device
*pdev
)
449 struct device
*dev
= &pdev
->dev
;
450 const struct qcom_icc_desc
*desc
;
451 struct icc_onecell_data
*data
;
452 struct icc_provider
*provider
;
453 struct qcom_icc_node
* const *qnodes
;
454 struct qcom_icc_provider
*qp
;
455 struct icc_node
*node
;
457 const char * const *cds
= NULL
;
461 /* wait for the RPM proxy */
462 if (!qcom_icc_rpm_smd_available())
463 return -EPROBE_DEFER
;
465 desc
= of_device_get_match_data(dev
);
469 qnodes
= desc
->nodes
;
470 num_nodes
= desc
->num_nodes
;
472 if (desc
->num_intf_clocks
) {
473 cds
= desc
->intf_clocks
;
474 cd_num
= desc
->num_intf_clocks
;
476 /* 0 intf clocks is perfectly fine */
480 qp
= devm_kzalloc(dev
, sizeof(*qp
), GFP_KERNEL
);
484 qp
->intf_clks
= devm_kcalloc(dev
, cd_num
, sizeof(*qp
->intf_clks
), GFP_KERNEL
);
488 if (desc
->bus_clk_desc
) {
489 qp
->bus_clk_desc
= devm_kzalloc(dev
, sizeof(*qp
->bus_clk_desc
),
491 if (!qp
->bus_clk_desc
)
494 qp
->bus_clk_desc
= desc
->bus_clk_desc
;
496 /* Some older SoCs may have a single non-RPM-owned bus clock. */
497 qp
->bus_clk
= devm_clk_get_optional(dev
, "bus");
498 if (IS_ERR(qp
->bus_clk
))
499 return PTR_ERR(qp
->bus_clk
);
502 data
= devm_kzalloc(dev
, struct_size(data
, nodes
, num_nodes
),
507 qp
->num_intf_clks
= cd_num
;
508 for (i
= 0; i
< cd_num
; i
++)
509 qp
->intf_clks
[i
].id
= cds
[i
];
511 qp
->keep_alive
= desc
->keep_alive
;
512 qp
->type
= desc
->type
;
513 qp
->qos_offset
= desc
->qos_offset
;
515 if (desc
->regmap_cfg
) {
516 struct resource
*res
;
519 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
521 /* Try parent's regmap */
522 qp
->regmap
= dev_get_regmap(dev
->parent
, NULL
);
528 mmio
= devm_ioremap_resource(dev
, res
);
530 return PTR_ERR(mmio
);
532 qp
->regmap
= devm_regmap_init_mmio(dev
, mmio
, desc
->regmap_cfg
);
533 if (IS_ERR(qp
->regmap
)) {
534 dev_err(dev
, "Cannot regmap interconnect bus resource\n");
535 return PTR_ERR(qp
->regmap
);
540 ret
= clk_prepare_enable(qp
->bus_clk
);
544 ret
= devm_clk_bulk_get(dev
, qp
->num_intf_clks
, qp
->intf_clks
);
546 goto err_disable_unprepare_clk
;
548 provider
= &qp
->provider
;
550 provider
->set
= qcom_icc_set
;
551 provider
->pre_aggregate
= qcom_icc_pre_bw_aggregate
;
552 provider
->aggregate
= qcom_icc_bw_aggregate
;
553 provider
->xlate_extended
= qcom_icc_xlate_extended
;
554 provider
->data
= data
;
556 icc_provider_init(provider
);
558 /* If this fails, bus accesses will crash the platform! */
559 ret
= clk_bulk_prepare_enable(qp
->num_intf_clks
, qp
->intf_clks
);
561 goto err_disable_unprepare_clk
;
563 for (i
= 0; i
< num_nodes
; i
++) {
566 if (!qnodes
[i
]->ab_coeff
)
567 qnodes
[i
]->ab_coeff
= qp
->ab_coeff
;
569 if (!qnodes
[i
]->ib_coeff
)
570 qnodes
[i
]->ib_coeff
= qp
->ib_coeff
;
572 node
= icc_node_create(qnodes
[i
]->id
);
574 clk_bulk_disable_unprepare(qp
->num_intf_clks
,
577 goto err_remove_nodes
;
580 node
->name
= qnodes
[i
]->name
;
581 node
->data
= qnodes
[i
];
582 icc_node_add(node
, provider
);
584 for (j
= 0; j
< qnodes
[i
]->num_links
; j
++)
585 icc_link_create(node
, qnodes
[i
]->links
[j
]);
587 /* Set QoS registers (we only need to do it once, generally) */
588 if (qnodes
[i
]->qos
.ap_owned
&&
589 qnodes
[i
]->qos
.qos_mode
!= NOC_QOS_MODE_INVALID
) {
590 ret
= qcom_icc_qos_set(node
);
592 clk_bulk_disable_unprepare(qp
->num_intf_clks
,
594 goto err_remove_nodes
;
598 data
->nodes
[i
] = node
;
600 data
->num_nodes
= num_nodes
;
602 clk_bulk_disable_unprepare(qp
->num_intf_clks
, qp
->intf_clks
);
604 ret
= icc_provider_register(provider
);
606 goto err_remove_nodes
;
608 platform_set_drvdata(pdev
, qp
);
610 /* Populate child NoC devices if any */
611 if (of_get_child_count(dev
->of_node
) > 0) {
612 ret
= of_platform_populate(dev
->of_node
, NULL
, NULL
, dev
);
614 goto err_deregister_provider
;
619 err_deregister_provider
:
620 icc_provider_deregister(provider
);
622 icc_nodes_remove(provider
);
623 err_disable_unprepare_clk
:
624 clk_disable_unprepare(qp
->bus_clk
);
628 EXPORT_SYMBOL(qnoc_probe
);
630 void qnoc_remove(struct platform_device
*pdev
)
632 struct qcom_icc_provider
*qp
= platform_get_drvdata(pdev
);
634 icc_provider_deregister(&qp
->provider
);
635 icc_nodes_remove(&qp
->provider
);
636 clk_disable_unprepare(qp
->bus_clk
);
638 EXPORT_SYMBOL(qnoc_remove
);