1 // SPDX-License-Identifier: GPL-2.0
3 * Interconnect framework core driver
5 * Copyright (c) 2017-2019, Linaro Ltd.
6 * Author: Georgi Djakov <georgi.djakov@linaro.org>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/idr.h>
12 #include <linux/init.h>
13 #include <linux/interconnect.h>
14 #include <linux/interconnect-provider.h>
15 #include <linux/list.h>
16 #include <linux/mutex.h>
17 #include <linux/slab.h>
19 #include <linux/overflow.h>
23 #define CREATE_TRACE_POINTS
26 static DEFINE_IDR(icc_idr
);
27 static LIST_HEAD(icc_providers
);
28 static int providers_count
;
29 static bool synced_state
;
30 static DEFINE_MUTEX(icc_lock
);
31 static DEFINE_MUTEX(icc_bw_lock
);
32 static struct dentry
*icc_debugfs_dir
;
34 static void icc_summary_show_one(struct seq_file
*s
, struct icc_node
*n
)
39 seq_printf(s
, "%-42s %12u %12u\n",
40 n
->name
, n
->avg_bw
, n
->peak_bw
);
43 static int icc_summary_show(struct seq_file
*s
, void *data
)
45 struct icc_provider
*provider
;
47 seq_puts(s
, " node tag avg peak\n");
48 seq_puts(s
, "--------------------------------------------------------------------\n");
50 mutex_lock(&icc_lock
);
52 list_for_each_entry(provider
, &icc_providers
, provider_list
) {
55 list_for_each_entry(n
, &provider
->nodes
, node_list
) {
58 icc_summary_show_one(s
, n
);
59 hlist_for_each_entry(r
, &n
->req_list
, req_node
) {
60 u32 avg_bw
= 0, peak_bw
= 0;
70 seq_printf(s
, " %-27s %12u %12u %12u\n",
71 dev_name(r
->dev
), r
->tag
, avg_bw
, peak_bw
);
76 mutex_unlock(&icc_lock
);
80 DEFINE_SHOW_ATTRIBUTE(icc_summary
);
82 static void icc_graph_show_link(struct seq_file
*s
, int level
,
83 struct icc_node
*n
, struct icc_node
*m
)
85 seq_printf(s
, "%s\"%d:%s\" -> \"%d:%s\"\n",
86 level
== 2 ? "\t\t" : "\t",
87 n
->id
, n
->name
, m
->id
, m
->name
);
90 static void icc_graph_show_node(struct seq_file
*s
, struct icc_node
*n
)
92 seq_printf(s
, "\t\t\"%d:%s\" [label=\"%d:%s",
93 n
->id
, n
->name
, n
->id
, n
->name
);
94 seq_printf(s
, "\n\t\t\t|avg_bw=%ukBps", n
->avg_bw
);
95 seq_printf(s
, "\n\t\t\t|peak_bw=%ukBps", n
->peak_bw
);
99 static int icc_graph_show(struct seq_file
*s
, void *data
)
101 struct icc_provider
*provider
;
103 int cluster_index
= 0;
106 seq_puts(s
, "digraph {\n\trankdir = LR\n\tnode [shape = record]\n");
107 mutex_lock(&icc_lock
);
109 /* draw providers as cluster subgraphs */
111 list_for_each_entry(provider
, &icc_providers
, provider_list
) {
112 seq_printf(s
, "\tsubgraph cluster_%d {\n", ++cluster_index
);
114 seq_printf(s
, "\t\tlabel = \"%s\"\n",
115 dev_name(provider
->dev
));
118 list_for_each_entry(n
, &provider
->nodes
, node_list
)
119 icc_graph_show_node(s
, n
);
121 /* draw internal links */
122 list_for_each_entry(n
, &provider
->nodes
, node_list
)
123 for (i
= 0; i
< n
->num_links
; ++i
)
124 if (n
->provider
== n
->links
[i
]->provider
)
125 icc_graph_show_link(s
, 2, n
,
128 seq_puts(s
, "\t}\n");
131 /* draw external links */
132 list_for_each_entry(provider
, &icc_providers
, provider_list
)
133 list_for_each_entry(n
, &provider
->nodes
, node_list
)
134 for (i
= 0; i
< n
->num_links
; ++i
)
135 if (n
->provider
!= n
->links
[i
]->provider
)
136 icc_graph_show_link(s
, 1, n
,
139 mutex_unlock(&icc_lock
);
144 DEFINE_SHOW_ATTRIBUTE(icc_graph
);
146 static struct icc_node
*node_find(const int id
)
148 return idr_find(&icc_idr
, id
);
151 static struct icc_node
*node_find_by_name(const char *name
)
153 struct icc_provider
*provider
;
156 list_for_each_entry(provider
, &icc_providers
, provider_list
) {
157 list_for_each_entry(n
, &provider
->nodes
, node_list
) {
158 if (!strcmp(n
->name
, name
))
166 static struct icc_path
*path_init(struct device
*dev
, struct icc_node
*dst
,
169 struct icc_node
*node
= dst
;
170 struct icc_path
*path
;
173 path
= kzalloc(struct_size(path
, reqs
, num_nodes
), GFP_KERNEL
);
175 return ERR_PTR(-ENOMEM
);
177 path
->num_nodes
= num_nodes
;
179 mutex_lock(&icc_bw_lock
);
181 for (i
= num_nodes
- 1; i
>= 0; i
--) {
182 node
->provider
->users
++;
183 hlist_add_head(&path
->reqs
[i
].req_node
, &node
->req_list
);
184 path
->reqs
[i
].node
= node
;
185 path
->reqs
[i
].dev
= dev
;
186 path
->reqs
[i
].enabled
= true;
187 /* reference to previous node was saved during path traversal */
188 node
= node
->reverse
;
191 mutex_unlock(&icc_bw_lock
);
196 static struct icc_path
*path_find(struct device
*dev
, struct icc_node
*src
,
197 struct icc_node
*dst
)
199 struct icc_path
*path
= ERR_PTR(-EPROBE_DEFER
);
200 struct icc_node
*n
, *node
= NULL
;
201 struct list_head traverse_list
;
202 struct list_head edge_list
;
203 struct list_head visited_list
;
207 INIT_LIST_HEAD(&traverse_list
);
208 INIT_LIST_HEAD(&edge_list
);
209 INIT_LIST_HEAD(&visited_list
);
211 list_add(&src
->search_list
, &traverse_list
);
215 list_for_each_entry_safe(node
, n
, &traverse_list
, search_list
) {
218 list_splice_init(&edge_list
, &visited_list
);
219 list_splice_init(&traverse_list
, &visited_list
);
222 for (i
= 0; i
< node
->num_links
; i
++) {
223 struct icc_node
*tmp
= node
->links
[i
];
226 path
= ERR_PTR(-ENOENT
);
230 if (tmp
->is_traversed
)
233 tmp
->is_traversed
= true;
235 list_add_tail(&tmp
->search_list
, &edge_list
);
242 list_splice_init(&traverse_list
, &visited_list
);
243 list_splice_init(&edge_list
, &traverse_list
);
245 /* count the hops including the source */
248 } while (!list_empty(&traverse_list
));
252 /* reset the traversed state */
253 list_for_each_entry_reverse(n
, &visited_list
, search_list
)
254 n
->is_traversed
= false;
257 path
= path_init(dev
, dst
, depth
);
263 * We want the path to honor all bandwidth requests, so the average and peak
264 * bandwidth requirements from each consumer are aggregated at each node.
265 * The aggregation is platform specific, so each platform can customize it by
266 * implementing its own aggregate() function.
269 static int aggregate_requests(struct icc_node
*node
)
271 struct icc_provider
*p
= node
->provider
;
278 if (p
->pre_aggregate
)
279 p
->pre_aggregate(node
);
281 hlist_for_each_entry(r
, &node
->req_list
, req_node
) {
284 peak_bw
= r
->peak_bw
;
289 p
->aggregate(node
, r
->tag
, avg_bw
, peak_bw
,
290 &node
->avg_bw
, &node
->peak_bw
);
292 /* during boot use the initial bandwidth as a floor value */
294 node
->avg_bw
= max(node
->avg_bw
, node
->init_avg
);
295 node
->peak_bw
= max(node
->peak_bw
, node
->init_peak
);
302 static int apply_constraints(struct icc_path
*path
)
304 struct icc_node
*next
, *prev
= NULL
;
305 struct icc_provider
*p
;
309 for (i
= 0; i
< path
->num_nodes
; i
++) {
310 next
= path
->reqs
[i
].node
;
313 /* both endpoints should be valid master-slave pairs */
314 if (!prev
|| (p
!= prev
->provider
&& !p
->inter_set
)) {
319 /* set the constraints */
320 ret
= p
->set(prev
, next
);
330 int icc_std_aggregate(struct icc_node
*node
, u32 tag
, u32 avg_bw
,
331 u32 peak_bw
, u32
*agg_avg
, u32
*agg_peak
)
334 *agg_peak
= max(*agg_peak
, peak_bw
);
338 EXPORT_SYMBOL_GPL(icc_std_aggregate
);
340 /* of_icc_xlate_onecell() - Translate function using a single index.
341 * @spec: OF phandle args to map into an interconnect node.
342 * @data: private data (pointer to struct icc_onecell_data)
344 * This is a generic translate function that can be used to model simple
345 * interconnect providers that have one device tree node and provide
346 * multiple interconnect nodes. A single cell is used as an index into
347 * an array of icc nodes specified in the icc_onecell_data struct when
348 * registering the provider.
350 struct icc_node
*of_icc_xlate_onecell(const struct of_phandle_args
*spec
,
353 struct icc_onecell_data
*icc_data
= data
;
354 unsigned int idx
= spec
->args
[0];
356 if (idx
>= icc_data
->num_nodes
) {
357 pr_err("%s: invalid index %u\n", __func__
, idx
);
358 return ERR_PTR(-EINVAL
);
361 return icc_data
->nodes
[idx
];
363 EXPORT_SYMBOL_GPL(of_icc_xlate_onecell
);
366 * of_icc_get_from_provider() - Look-up interconnect node
367 * @spec: OF phandle args to use for look-up
369 * Looks for interconnect provider under the node specified by @spec and if
370 * found, uses xlate function of the provider to map phandle args to node.
372 * Returns a valid pointer to struct icc_node_data on success or ERR_PTR()
375 struct icc_node_data
*of_icc_get_from_provider(const struct of_phandle_args
*spec
)
377 struct icc_node
*node
= ERR_PTR(-EPROBE_DEFER
);
378 struct icc_node_data
*data
= NULL
;
379 struct icc_provider
*provider
;
382 return ERR_PTR(-EINVAL
);
384 mutex_lock(&icc_lock
);
385 list_for_each_entry(provider
, &icc_providers
, provider_list
) {
386 if (provider
->dev
->of_node
== spec
->np
) {
387 if (provider
->xlate_extended
) {
388 data
= provider
->xlate_extended(spec
, provider
->data
);
394 node
= provider
->xlate(spec
, provider
->data
);
400 mutex_unlock(&icc_lock
);
403 return ERR_PTR(-EINVAL
);
406 return ERR_CAST(node
);
409 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
411 return ERR_PTR(-ENOMEM
);
417 EXPORT_SYMBOL_GPL(of_icc_get_from_provider
);
419 static void devm_icc_release(struct device
*dev
, void *res
)
421 icc_put(*(struct icc_path
**)res
);
424 struct icc_path
*devm_of_icc_get(struct device
*dev
, const char *name
)
426 struct icc_path
**ptr
, *path
;
428 ptr
= devres_alloc(devm_icc_release
, sizeof(*ptr
), GFP_KERNEL
);
430 return ERR_PTR(-ENOMEM
);
432 path
= of_icc_get(dev
, name
);
435 devres_add(dev
, ptr
);
442 EXPORT_SYMBOL_GPL(devm_of_icc_get
);
445 * of_icc_get_by_index() - get a path handle from a DT node based on index
446 * @dev: device pointer for the consumer device
447 * @idx: interconnect path index
449 * This function will search for a path between two endpoints and return an
450 * icc_path handle on success. Use icc_put() to release constraints when they
451 * are not needed anymore.
452 * If the interconnect API is disabled, NULL is returned and the consumer
453 * drivers will still build. Drivers are free to handle this specifically,
454 * but they don't have to.
456 * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
457 * when the API is disabled or the "interconnects" DT property is missing.
459 struct icc_path
*of_icc_get_by_index(struct device
*dev
, int idx
)
461 struct icc_path
*path
;
462 struct icc_node_data
*src_data
, *dst_data
;
463 struct device_node
*np
;
464 struct of_phandle_args src_args
, dst_args
;
467 if (!dev
|| !dev
->of_node
)
468 return ERR_PTR(-ENODEV
);
473 * When the consumer DT node do not have "interconnects" property
474 * return a NULL path to skip setting constraints.
476 if (!of_property_present(np
, "interconnects"))
480 * We use a combination of phandle and specifier for endpoint. For now
481 * lets support only global ids and extend this in the future if needed
482 * without breaking DT compatibility.
484 ret
= of_parse_phandle_with_args(np
, "interconnects",
485 "#interconnect-cells", idx
* 2,
490 of_node_put(src_args
.np
);
492 ret
= of_parse_phandle_with_args(np
, "interconnects",
493 "#interconnect-cells", idx
* 2 + 1,
498 of_node_put(dst_args
.np
);
500 src_data
= of_icc_get_from_provider(&src_args
);
502 if (IS_ERR(src_data
)) {
503 dev_err_probe(dev
, PTR_ERR(src_data
), "error finding src node\n");
504 return ERR_CAST(src_data
);
507 dst_data
= of_icc_get_from_provider(&dst_args
);
509 if (IS_ERR(dst_data
)) {
510 dev_err_probe(dev
, PTR_ERR(dst_data
), "error finding dst node\n");
512 return ERR_CAST(dst_data
);
515 mutex_lock(&icc_lock
);
516 path
= path_find(dev
, src_data
->node
, dst_data
->node
);
517 mutex_unlock(&icc_lock
);
519 dev_err(dev
, "%s: invalid path=%ld\n", __func__
, PTR_ERR(path
));
523 if (src_data
->tag
&& src_data
->tag
== dst_data
->tag
)
524 icc_set_tag(path
, src_data
->tag
);
526 path
->name
= kasprintf(GFP_KERNEL
, "%s-%s",
527 src_data
->node
->name
, dst_data
->node
->name
);
530 path
= ERR_PTR(-ENOMEM
);
538 EXPORT_SYMBOL_GPL(of_icc_get_by_index
);
541 * of_icc_get() - get a path handle from a DT node based on name
542 * @dev: device pointer for the consumer device
543 * @name: interconnect path name
545 * This function will search for a path between two endpoints and return an
546 * icc_path handle on success. Use icc_put() to release constraints when they
547 * are not needed anymore.
548 * If the interconnect API is disabled, NULL is returned and the consumer
549 * drivers will still build. Drivers are free to handle this specifically,
550 * but they don't have to.
552 * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
553 * when the API is disabled or the "interconnects" DT property is missing.
555 struct icc_path
*of_icc_get(struct device
*dev
, const char *name
)
557 struct device_node
*np
;
560 if (!dev
|| !dev
->of_node
)
561 return ERR_PTR(-ENODEV
);
566 * When the consumer DT node do not have "interconnects" property
567 * return a NULL path to skip setting constraints.
569 if (!of_property_present(np
, "interconnects"))
573 * We use a combination of phandle and specifier for endpoint. For now
574 * lets support only global ids and extend this in the future if needed
575 * without breaking DT compatibility.
578 idx
= of_property_match_string(np
, "interconnect-names", name
);
583 return of_icc_get_by_index(dev
, idx
);
585 EXPORT_SYMBOL_GPL(of_icc_get
);
588 * icc_get() - get a path handle between two endpoints
589 * @dev: device pointer for the consumer device
590 * @src: source node name
591 * @dst: destination node name
593 * This function will search for a path between two endpoints and return an
594 * icc_path handle on success. Use icc_put() to release constraints when they
595 * are not needed anymore.
597 * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
598 * when the API is disabled.
600 struct icc_path
*icc_get(struct device
*dev
, const char *src
, const char *dst
)
602 struct icc_node
*src_node
, *dst_node
;
603 struct icc_path
*path
= ERR_PTR(-EPROBE_DEFER
);
605 mutex_lock(&icc_lock
);
607 src_node
= node_find_by_name(src
);
609 dev_err(dev
, "%s: invalid src=%s\n", __func__
, src
);
613 dst_node
= node_find_by_name(dst
);
615 dev_err(dev
, "%s: invalid dst=%s\n", __func__
, dst
);
619 path
= path_find(dev
, src_node
, dst_node
);
621 dev_err(dev
, "%s: invalid path=%ld\n", __func__
, PTR_ERR(path
));
625 path
->name
= kasprintf(GFP_KERNEL
, "%s-%s", src_node
->name
, dst_node
->name
);
628 path
= ERR_PTR(-ENOMEM
);
631 mutex_unlock(&icc_lock
);
636 * icc_set_tag() - set an optional tag on a path
637 * @path: the path we want to tag
638 * @tag: the tag value
640 * This function allows consumers to append a tag to the requests associated
641 * with a path, so that a different aggregation could be done based on this tag.
643 void icc_set_tag(struct icc_path
*path
, u32 tag
)
650 mutex_lock(&icc_lock
);
652 for (i
= 0; i
< path
->num_nodes
; i
++)
653 path
->reqs
[i
].tag
= tag
;
655 mutex_unlock(&icc_lock
);
657 EXPORT_SYMBOL_GPL(icc_set_tag
);
660 * icc_get_name() - Get name of the icc path
661 * @path: interconnect path
663 * This function is used by an interconnect consumer to get the name of the icc
666 * Returns a valid pointer on success, or NULL otherwise.
668 const char *icc_get_name(struct icc_path
*path
)
675 EXPORT_SYMBOL_GPL(icc_get_name
);
678 * icc_set_bw() - set bandwidth constraints on an interconnect path
679 * @path: interconnect path
680 * @avg_bw: average bandwidth in kilobytes per second
681 * @peak_bw: peak bandwidth in kilobytes per second
683 * This function is used by an interconnect consumer to express its own needs
684 * in terms of bandwidth for a previously requested path between two endpoints.
685 * The requests are aggregated and each node is updated accordingly. The entire
686 * path is locked by a mutex to ensure that the set() is completed.
687 * The @path can be NULL when the "interconnects" DT properties is missing,
688 * which will mean that no constraints will be set.
690 * Returns 0 on success, or an appropriate error code otherwise.
692 int icc_set_bw(struct icc_path
*path
, u32 avg_bw
, u32 peak_bw
)
694 struct icc_node
*node
;
695 u32 old_avg
, old_peak
;
702 if (WARN_ON(IS_ERR(path
) || !path
->num_nodes
))
705 mutex_lock(&icc_bw_lock
);
707 old_avg
= path
->reqs
[0].avg_bw
;
708 old_peak
= path
->reqs
[0].peak_bw
;
710 for (i
= 0; i
< path
->num_nodes
; i
++) {
711 node
= path
->reqs
[i
].node
;
713 /* update the consumer request for this path */
714 path
->reqs
[i
].avg_bw
= avg_bw
;
715 path
->reqs
[i
].peak_bw
= peak_bw
;
717 /* aggregate requests for this node */
718 aggregate_requests(node
);
720 trace_icc_set_bw(path
, node
, i
, avg_bw
, peak_bw
);
723 ret
= apply_constraints(path
);
725 pr_debug("interconnect: error applying constraints (%d)\n",
728 for (i
= 0; i
< path
->num_nodes
; i
++) {
729 node
= path
->reqs
[i
].node
;
730 path
->reqs
[i
].avg_bw
= old_avg
;
731 path
->reqs
[i
].peak_bw
= old_peak
;
732 aggregate_requests(node
);
734 apply_constraints(path
);
737 mutex_unlock(&icc_bw_lock
);
739 trace_icc_set_bw_end(path
, ret
);
743 EXPORT_SYMBOL_GPL(icc_set_bw
);
745 static int __icc_enable(struct icc_path
*path
, bool enable
)
752 if (WARN_ON(IS_ERR(path
) || !path
->num_nodes
))
755 mutex_lock(&icc_lock
);
757 for (i
= 0; i
< path
->num_nodes
; i
++)
758 path
->reqs
[i
].enabled
= enable
;
760 mutex_unlock(&icc_lock
);
762 return icc_set_bw(path
, path
->reqs
[0].avg_bw
,
763 path
->reqs
[0].peak_bw
);
766 int icc_enable(struct icc_path
*path
)
768 return __icc_enable(path
, true);
770 EXPORT_SYMBOL_GPL(icc_enable
);
772 int icc_disable(struct icc_path
*path
)
774 return __icc_enable(path
, false);
776 EXPORT_SYMBOL_GPL(icc_disable
);
779 * icc_put() - release the reference to the icc_path
780 * @path: interconnect path
782 * Use this function to release the constraints on a path when the path is
783 * no longer needed. The constraints will be re-aggregated.
785 void icc_put(struct icc_path
*path
)
787 struct icc_node
*node
;
791 if (!path
|| WARN_ON(IS_ERR(path
)))
794 ret
= icc_set_bw(path
, 0, 0);
796 pr_err("%s: error (%d)\n", __func__
, ret
);
798 mutex_lock(&icc_lock
);
799 mutex_lock(&icc_bw_lock
);
801 for (i
= 0; i
< path
->num_nodes
; i
++) {
802 node
= path
->reqs
[i
].node
;
803 hlist_del(&path
->reqs
[i
].req_node
);
804 if (!WARN_ON(!node
->provider
->users
))
805 node
->provider
->users
--;
808 mutex_unlock(&icc_bw_lock
);
809 mutex_unlock(&icc_lock
);
814 EXPORT_SYMBOL_GPL(icc_put
);
816 static struct icc_node
*icc_node_create_nolock(int id
)
818 struct icc_node
*node
;
820 /* check if node already exists */
821 node
= node_find(id
);
825 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
827 return ERR_PTR(-ENOMEM
);
829 id
= idr_alloc(&icc_idr
, node
, id
, id
+ 1, GFP_KERNEL
);
831 WARN(1, "%s: couldn't get idr\n", __func__
);
842 * icc_node_create() - create a node
845 * Return: icc_node pointer on success, or ERR_PTR() on error
847 struct icc_node
*icc_node_create(int id
)
849 struct icc_node
*node
;
851 mutex_lock(&icc_lock
);
853 node
= icc_node_create_nolock(id
);
855 mutex_unlock(&icc_lock
);
859 EXPORT_SYMBOL_GPL(icc_node_create
);
862 * icc_node_destroy() - destroy a node
865 void icc_node_destroy(int id
)
867 struct icc_node
*node
;
869 mutex_lock(&icc_lock
);
871 node
= node_find(id
);
873 idr_remove(&icc_idr
, node
->id
);
874 WARN_ON(!hlist_empty(&node
->req_list
));
877 mutex_unlock(&icc_lock
);
885 EXPORT_SYMBOL_GPL(icc_node_destroy
);
888 * icc_link_create() - create a link between two nodes
889 * @node: source node id
890 * @dst_id: destination node id
892 * Create a link between two nodes. The nodes might belong to different
893 * interconnect providers and the @dst_id node might not exist (if the
894 * provider driver has not probed yet). So just create the @dst_id node
895 * and when the actual provider driver is probed, the rest of the node
898 * Return: 0 on success, or an error code otherwise
900 int icc_link_create(struct icc_node
*node
, const int dst_id
)
902 struct icc_node
*dst
;
903 struct icc_node
**new;
909 mutex_lock(&icc_lock
);
911 dst
= node_find(dst_id
);
913 dst
= icc_node_create_nolock(dst_id
);
921 new = krealloc(node
->links
,
922 (node
->num_links
+ 1) * sizeof(*node
->links
),
930 node
->links
[node
->num_links
++] = dst
;
933 mutex_unlock(&icc_lock
);
937 EXPORT_SYMBOL_GPL(icc_link_create
);
940 * icc_node_add() - add interconnect node to interconnect provider
941 * @node: pointer to the interconnect node
942 * @provider: pointer to the interconnect provider
944 void icc_node_add(struct icc_node
*node
, struct icc_provider
*provider
)
946 if (WARN_ON(node
->provider
))
949 mutex_lock(&icc_lock
);
950 mutex_lock(&icc_bw_lock
);
952 node
->provider
= provider
;
953 list_add_tail(&node
->node_list
, &provider
->nodes
);
955 /* get the initial bandwidth values and sync them with hardware */
956 if (provider
->get_bw
) {
957 provider
->get_bw(node
, &node
->init_avg
, &node
->init_peak
);
959 node
->init_avg
= INT_MAX
;
960 node
->init_peak
= INT_MAX
;
962 node
->avg_bw
= node
->init_avg
;
963 node
->peak_bw
= node
->init_peak
;
965 if (node
->avg_bw
|| node
->peak_bw
) {
966 if (provider
->pre_aggregate
)
967 provider
->pre_aggregate(node
);
969 if (provider
->aggregate
)
970 provider
->aggregate(node
, 0, node
->init_avg
, node
->init_peak
,
971 &node
->avg_bw
, &node
->peak_bw
);
973 provider
->set(node
, node
);
979 mutex_unlock(&icc_bw_lock
);
980 mutex_unlock(&icc_lock
);
982 EXPORT_SYMBOL_GPL(icc_node_add
);
985 * icc_node_del() - delete interconnect node from interconnect provider
986 * @node: pointer to the interconnect node
988 void icc_node_del(struct icc_node
*node
)
990 mutex_lock(&icc_lock
);
992 list_del(&node
->node_list
);
994 mutex_unlock(&icc_lock
);
996 EXPORT_SYMBOL_GPL(icc_node_del
);
999 * icc_nodes_remove() - remove all previously added nodes from provider
1000 * @provider: the interconnect provider we are removing nodes from
1002 * Return: 0 on success, or an error code otherwise
1004 int icc_nodes_remove(struct icc_provider
*provider
)
1006 struct icc_node
*n
, *tmp
;
1008 if (WARN_ON(IS_ERR_OR_NULL(provider
)))
1011 list_for_each_entry_safe_reverse(n
, tmp
, &provider
->nodes
, node_list
) {
1013 icc_node_destroy(n
->id
);
1018 EXPORT_SYMBOL_GPL(icc_nodes_remove
);
1021 * icc_provider_init() - initialize a new interconnect provider
1022 * @provider: the interconnect provider to initialize
1024 * Must be called before adding nodes to the provider.
1026 void icc_provider_init(struct icc_provider
*provider
)
1028 WARN_ON(!provider
->set
);
1030 INIT_LIST_HEAD(&provider
->nodes
);
1032 EXPORT_SYMBOL_GPL(icc_provider_init
);
1035 * icc_provider_register() - register a new interconnect provider
1036 * @provider: the interconnect provider to register
1038 * Return: 0 on success, or an error code otherwise
1040 int icc_provider_register(struct icc_provider
*provider
)
1042 if (WARN_ON(!provider
->xlate
&& !provider
->xlate_extended
))
1045 mutex_lock(&icc_lock
);
1046 list_add_tail(&provider
->provider_list
, &icc_providers
);
1047 mutex_unlock(&icc_lock
);
1049 dev_dbg(provider
->dev
, "interconnect provider registered\n");
1053 EXPORT_SYMBOL_GPL(icc_provider_register
);
1056 * icc_provider_deregister() - deregister an interconnect provider
1057 * @provider: the interconnect provider to deregister
1059 void icc_provider_deregister(struct icc_provider
*provider
)
1061 mutex_lock(&icc_lock
);
1062 WARN_ON(provider
->users
);
1064 list_del(&provider
->provider_list
);
1065 mutex_unlock(&icc_lock
);
1067 EXPORT_SYMBOL_GPL(icc_provider_deregister
);
1069 static const struct of_device_id __maybe_unused ignore_list
[] = {
1070 { .compatible
= "qcom,sc7180-ipa-virt" },
1071 { .compatible
= "qcom,sc8180x-ipa-virt" },
1072 { .compatible
= "qcom,sdx55-ipa-virt" },
1073 { .compatible
= "qcom,sm8150-ipa-virt" },
1074 { .compatible
= "qcom,sm8250-ipa-virt" },
1078 static int of_count_icc_providers(struct device_node
*np
)
1080 struct device_node
*child
;
1083 for_each_available_child_of_node(np
, child
) {
1084 if (of_property_present(child
, "#interconnect-cells") &&
1085 likely(!of_match_node(ignore_list
, child
)))
1087 count
+= of_count_icc_providers(child
);
1093 void icc_sync_state(struct device
*dev
)
1095 struct icc_provider
*p
;
1101 if (count
< providers_count
)
1104 mutex_lock(&icc_lock
);
1105 mutex_lock(&icc_bw_lock
);
1106 synced_state
= true;
1107 list_for_each_entry(p
, &icc_providers
, provider_list
) {
1108 dev_dbg(p
->dev
, "interconnect provider is in synced state\n");
1109 list_for_each_entry(n
, &p
->nodes
, node_list
) {
1110 if (n
->init_avg
|| n
->init_peak
) {
1113 aggregate_requests(n
);
1118 mutex_unlock(&icc_bw_lock
);
1119 mutex_unlock(&icc_lock
);
1121 EXPORT_SYMBOL_GPL(icc_sync_state
);
1123 static int __init
icc_init(void)
1125 struct device_node
*root
;
1127 /* Teach lockdep about lock ordering wrt. shrinker: */
1128 fs_reclaim_acquire(GFP_KERNEL
);
1129 might_lock(&icc_bw_lock
);
1130 fs_reclaim_release(GFP_KERNEL
);
1132 root
= of_find_node_by_path("/");
1134 providers_count
= of_count_icc_providers(root
);
1137 icc_debugfs_dir
= debugfs_create_dir("interconnect", NULL
);
1138 debugfs_create_file("interconnect_summary", 0444,
1139 icc_debugfs_dir
, NULL
, &icc_summary_fops
);
1140 debugfs_create_file("interconnect_graph", 0444,
1141 icc_debugfs_dir
, NULL
, &icc_graph_fops
);
1143 icc_debugfs_client_init(icc_debugfs_dir
);
1148 device_initcall(icc_init
);