Merge tag 'hwmon-for-v6.13-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / drivers / interconnect / qcom / bcm-voter.c
bloba2d437a05a11fa7325f944865c81a3ac7dbb203e
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
4 */
6 #include <asm/div64.h>
7 #include <linux/interconnect-provider.h>
8 #include <linux/list_sort.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include <linux/platform_device.h>
13 #include <soc/qcom/rpmh.h>
14 #include <soc/qcom/tcs.h>
16 #include "bcm-voter.h"
17 #include "icc-rpmh.h"
19 static LIST_HEAD(bcm_voters);
20 static DEFINE_MUTEX(bcm_voter_lock);
22 /**
23 * struct bcm_voter - Bus Clock Manager voter
24 * @dev: reference to the device that communicates with the BCM
25 * @np: reference to the device node to match bcm voters
26 * @lock: mutex to protect commit and wake/sleep lists in the voter
27 * @commit_list: list containing bcms to be committed to hardware
28 * @ws_list: list containing bcms that have different wake/sleep votes
29 * @voter_node: list of bcm voters
30 * @tcs_wait: mask for which buckets require TCS completion
32 struct bcm_voter {
33 struct device *dev;
34 struct device_node *np;
35 struct mutex lock;
36 struct list_head commit_list;
37 struct list_head ws_list;
38 struct list_head voter_node;
39 u32 tcs_wait;
42 static int cmp_vcd(void *priv, const struct list_head *a, const struct list_head *b)
44 const struct qcom_icc_bcm *bcm_a = list_entry(a, struct qcom_icc_bcm, list);
45 const struct qcom_icc_bcm *bcm_b = list_entry(b, struct qcom_icc_bcm, list);
47 return bcm_a->aux_data.vcd - bcm_b->aux_data.vcd;
50 static u64 bcm_div(u64 num, u32 base)
52 /* Ensure that small votes aren't lost. */
53 if (num && num < base)
54 return 1;
56 do_div(num, base);
58 return num;
61 /* BCMs with enable_mask use one-hot-encoding for on/off signaling */
62 static void bcm_aggregate_mask(struct qcom_icc_bcm *bcm)
64 struct qcom_icc_node *node;
65 int bucket, i;
67 for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) {
68 bcm->vote_x[bucket] = 0;
69 bcm->vote_y[bucket] = 0;
71 for (i = 0; i < bcm->num_nodes; i++) {
72 node = bcm->nodes[i];
74 /* If any vote in this bucket exists, keep the BCM enabled */
75 if (node->sum_avg[bucket] || node->max_peak[bucket]) {
76 bcm->vote_x[bucket] = 0;
77 bcm->vote_y[bucket] = bcm->enable_mask;
78 break;
83 if (bcm->keepalive) {
84 bcm->vote_x[QCOM_ICC_BUCKET_AMC] = bcm->enable_mask;
85 bcm->vote_x[QCOM_ICC_BUCKET_WAKE] = bcm->enable_mask;
86 bcm->vote_y[QCOM_ICC_BUCKET_AMC] = bcm->enable_mask;
87 bcm->vote_y[QCOM_ICC_BUCKET_WAKE] = bcm->enable_mask;
91 static void bcm_aggregate(struct qcom_icc_bcm *bcm)
93 struct qcom_icc_node *node;
94 size_t i, bucket;
95 u64 agg_avg[QCOM_ICC_NUM_BUCKETS] = {0};
96 u64 agg_peak[QCOM_ICC_NUM_BUCKETS] = {0};
97 u64 temp;
99 for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) {
100 for (i = 0; i < bcm->num_nodes; i++) {
101 node = bcm->nodes[i];
102 temp = bcm_div(node->sum_avg[bucket] * bcm->aux_data.width,
103 node->buswidth * node->channels);
104 agg_avg[bucket] = max(agg_avg[bucket], temp);
106 temp = bcm_div(node->max_peak[bucket] * bcm->aux_data.width,
107 node->buswidth);
108 agg_peak[bucket] = max(agg_peak[bucket], temp);
111 temp = agg_avg[bucket] * bcm->vote_scale;
112 bcm->vote_x[bucket] = bcm_div(temp, bcm->aux_data.unit);
114 temp = agg_peak[bucket] * bcm->vote_scale;
115 bcm->vote_y[bucket] = bcm_div(temp, bcm->aux_data.unit);
118 if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 &&
119 bcm->vote_y[QCOM_ICC_BUCKET_AMC] == 0) {
120 bcm->vote_x[QCOM_ICC_BUCKET_AMC] = 1;
121 bcm->vote_x[QCOM_ICC_BUCKET_WAKE] = 1;
122 bcm->vote_y[QCOM_ICC_BUCKET_AMC] = 1;
123 bcm->vote_y[QCOM_ICC_BUCKET_WAKE] = 1;
127 static inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y,
128 u32 addr, bool commit, bool wait)
130 bool valid = true;
132 if (!cmd)
133 return;
135 memset(cmd, 0, sizeof(*cmd));
137 if (vote_x == 0 && vote_y == 0)
138 valid = false;
140 if (vote_x > BCM_TCS_CMD_VOTE_MASK)
141 vote_x = BCM_TCS_CMD_VOTE_MASK;
143 if (vote_y > BCM_TCS_CMD_VOTE_MASK)
144 vote_y = BCM_TCS_CMD_VOTE_MASK;
146 cmd->addr = addr;
147 cmd->data = BCM_TCS_CMD(commit, valid, vote_x, vote_y);
150 * Set the wait for completion flag on command that need to be completed
151 * before the next command.
153 cmd->wait = wait;
156 static void tcs_list_gen(struct bcm_voter *voter, int bucket,
157 struct tcs_cmd tcs_list[MAX_VCD],
158 int n[MAX_VCD + 1])
160 struct list_head *bcm_list = &voter->commit_list;
161 struct qcom_icc_bcm *bcm;
162 bool commit, wait;
163 size_t idx = 0, batch = 0, cur_vcd_size = 0;
165 memset(n, 0, sizeof(int) * (MAX_VCD + 1));
167 list_for_each_entry(bcm, bcm_list, list) {
168 commit = false;
169 cur_vcd_size++;
170 if ((list_is_last(&bcm->list, bcm_list)) ||
171 bcm->aux_data.vcd != list_next_entry(bcm, list)->aux_data.vcd) {
172 commit = true;
173 cur_vcd_size = 0;
176 wait = commit && (voter->tcs_wait & BIT(bucket));
178 tcs_cmd_gen(&tcs_list[idx], bcm->vote_x[bucket],
179 bcm->vote_y[bucket], bcm->addr, commit, wait);
180 idx++;
181 n[batch]++;
183 * Batch the BCMs in such a way that we do not split them in
184 * multiple payloads when they are under the same VCD. This is
185 * to ensure that every BCM is committed since we only set the
186 * commit bit on the last BCM request of every VCD.
188 if (n[batch] >= MAX_RPMH_PAYLOAD) {
189 if (!commit) {
190 n[batch] -= cur_vcd_size;
191 n[batch + 1] = cur_vcd_size;
193 batch++;
199 * of_bcm_voter_get - gets a bcm voter handle from DT node
200 * @dev: device pointer for the consumer device
201 * @name: name for the bcm voter device
203 * This function will match a device_node pointer for the phandle
204 * specified in the device DT and return a bcm_voter handle on success.
206 * Returns bcm_voter pointer or ERR_PTR() on error. EPROBE_DEFER is returned
207 * when matching bcm voter is yet to be found.
209 struct bcm_voter *of_bcm_voter_get(struct device *dev, const char *name)
211 struct bcm_voter *voter = ERR_PTR(-EPROBE_DEFER);
212 struct bcm_voter *temp;
213 struct device_node *np, *node;
214 int idx = 0;
216 if (!dev || !dev->of_node)
217 return ERR_PTR(-ENODEV);
219 np = dev->of_node;
221 if (name) {
222 idx = of_property_match_string(np, "qcom,bcm-voter-names", name);
223 if (idx < 0)
224 return ERR_PTR(idx);
227 node = of_parse_phandle(np, "qcom,bcm-voters", idx);
229 mutex_lock(&bcm_voter_lock);
230 list_for_each_entry(temp, &bcm_voters, voter_node) {
231 if (temp->np == node) {
232 voter = temp;
233 break;
236 mutex_unlock(&bcm_voter_lock);
238 of_node_put(node);
239 return voter;
241 EXPORT_SYMBOL_GPL(of_bcm_voter_get);
244 * qcom_icc_bcm_voter_add - queues up the bcm nodes that require updates
245 * @voter: voter that the bcms are being added to
246 * @bcm: bcm to add to the commit and wake sleep list
248 void qcom_icc_bcm_voter_add(struct bcm_voter *voter, struct qcom_icc_bcm *bcm)
250 if (!voter)
251 return;
253 mutex_lock(&voter->lock);
254 if (list_empty(&bcm->list))
255 list_add_tail(&bcm->list, &voter->commit_list);
257 if (list_empty(&bcm->ws_list))
258 list_add_tail(&bcm->ws_list, &voter->ws_list);
260 mutex_unlock(&voter->lock);
262 EXPORT_SYMBOL_GPL(qcom_icc_bcm_voter_add);
265 * qcom_icc_bcm_voter_commit - generates and commits tcs cmds based on bcms
266 * @voter: voter that needs flushing
268 * This function generates a set of AMC commands and flushes to the BCM device
269 * associated with the voter. It conditionally generate WAKE and SLEEP commands
270 * based on deltas between WAKE/SLEEP requirements. The ws_list persists
271 * through multiple commit requests and bcm nodes are removed only when the
272 * requirements for WAKE matches SLEEP.
274 * Returns 0 on success, or an appropriate error code otherwise.
276 int qcom_icc_bcm_voter_commit(struct bcm_voter *voter)
278 struct qcom_icc_bcm *bcm;
279 struct qcom_icc_bcm *bcm_tmp;
280 int commit_idx[MAX_VCD + 1];
281 struct tcs_cmd cmds[MAX_BCMS];
282 int ret = 0;
284 if (!voter)
285 return 0;
287 mutex_lock(&voter->lock);
288 list_for_each_entry(bcm, &voter->commit_list, list) {
289 if (bcm->enable_mask)
290 bcm_aggregate_mask(bcm);
291 else
292 bcm_aggregate(bcm);
296 * Pre sort the BCMs based on VCD for ease of generating a command list
297 * that groups the BCMs with the same VCD together. VCDs are numbered
298 * with lowest being the most expensive time wise, ensuring that
299 * those commands are being sent the earliest in the queue. This needs
300 * to be sorted every commit since we can't guarantee the order in which
301 * the BCMs are added to the list.
303 list_sort(NULL, &voter->commit_list, cmp_vcd);
306 * Construct the command list based on a pre ordered list of BCMs
307 * based on VCD.
309 tcs_list_gen(voter, QCOM_ICC_BUCKET_AMC, cmds, commit_idx);
310 if (!commit_idx[0])
311 goto out;
313 rpmh_invalidate(voter->dev);
315 ret = rpmh_write_batch(voter->dev, RPMH_ACTIVE_ONLY_STATE,
316 cmds, commit_idx);
317 if (ret) {
318 pr_err("Error sending AMC RPMH requests (%d)\n", ret);
319 goto out;
322 list_for_each_entry_safe(bcm, bcm_tmp, &voter->commit_list, list)
323 list_del_init(&bcm->list);
325 list_for_each_entry_safe(bcm, bcm_tmp, &voter->ws_list, ws_list) {
327 * Only generate WAKE and SLEEP commands if a resource's
328 * requirements change as the execution environment transitions
329 * between different power states.
331 if (bcm->vote_x[QCOM_ICC_BUCKET_WAKE] !=
332 bcm->vote_x[QCOM_ICC_BUCKET_SLEEP] ||
333 bcm->vote_y[QCOM_ICC_BUCKET_WAKE] !=
334 bcm->vote_y[QCOM_ICC_BUCKET_SLEEP])
335 list_add_tail(&bcm->list, &voter->commit_list);
336 else
337 list_del_init(&bcm->ws_list);
340 if (list_empty(&voter->commit_list))
341 goto out;
343 list_sort(NULL, &voter->commit_list, cmp_vcd);
345 tcs_list_gen(voter, QCOM_ICC_BUCKET_WAKE, cmds, commit_idx);
347 ret = rpmh_write_batch(voter->dev, RPMH_WAKE_ONLY_STATE, cmds, commit_idx);
348 if (ret) {
349 pr_err("Error sending WAKE RPMH requests (%d)\n", ret);
350 goto out;
353 tcs_list_gen(voter, QCOM_ICC_BUCKET_SLEEP, cmds, commit_idx);
355 ret = rpmh_write_batch(voter->dev, RPMH_SLEEP_STATE, cmds, commit_idx);
356 if (ret) {
357 pr_err("Error sending SLEEP RPMH requests (%d)\n", ret);
358 goto out;
361 out:
362 list_for_each_entry_safe(bcm, bcm_tmp, &voter->commit_list, list)
363 list_del_init(&bcm->list);
365 mutex_unlock(&voter->lock);
366 return ret;
368 EXPORT_SYMBOL_GPL(qcom_icc_bcm_voter_commit);
370 static int qcom_icc_bcm_voter_probe(struct platform_device *pdev)
372 struct device_node *np = pdev->dev.of_node;
373 struct bcm_voter *voter;
375 voter = devm_kzalloc(&pdev->dev, sizeof(*voter), GFP_KERNEL);
376 if (!voter)
377 return -ENOMEM;
379 voter->dev = &pdev->dev;
380 voter->np = np;
382 if (of_property_read_u32(np, "qcom,tcs-wait", &voter->tcs_wait))
383 voter->tcs_wait = QCOM_ICC_TAG_ACTIVE_ONLY;
385 mutex_init(&voter->lock);
386 INIT_LIST_HEAD(&voter->commit_list);
387 INIT_LIST_HEAD(&voter->ws_list);
389 mutex_lock(&bcm_voter_lock);
390 list_add_tail(&voter->voter_node, &bcm_voters);
391 mutex_unlock(&bcm_voter_lock);
393 return 0;
396 static const struct of_device_id bcm_voter_of_match[] = {
397 { .compatible = "qcom,bcm-voter" },
400 MODULE_DEVICE_TABLE(of, bcm_voter_of_match);
402 static struct platform_driver qcom_icc_bcm_voter_driver = {
403 .probe = qcom_icc_bcm_voter_probe,
404 .driver = {
405 .name = "bcm_voter",
406 .of_match_table = bcm_voter_of_match,
409 module_platform_driver(qcom_icc_bcm_voter_driver);
411 MODULE_AUTHOR("David Dai <daidavid1@codeaurora.org>");
412 MODULE_DESCRIPTION("Qualcomm BCM Voter interconnect driver");
413 MODULE_LICENSE("GPL v2");