drm/panthor: Don't add write fences to the shared BOs
[drm/drm-misc.git] / arch / riscv / kernel / cacheinfo.c
blobb320b1d9aa01eff8cb41d4971f7f6dc461cce341
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2017 SiFive
4 */
6 #include <linux/acpi.h>
7 #include <linux/cpu.h>
8 #include <linux/of.h>
9 #include <asm/cacheinfo.h>
11 static struct riscv_cacheinfo_ops *rv_cache_ops;
13 void riscv_set_cacheinfo_ops(struct riscv_cacheinfo_ops *ops)
15 rv_cache_ops = ops;
17 EXPORT_SYMBOL_GPL(riscv_set_cacheinfo_ops);
19 const struct attribute_group *
20 cache_get_priv_group(struct cacheinfo *this_leaf)
22 if (rv_cache_ops && rv_cache_ops->get_priv_group)
23 return rv_cache_ops->get_priv_group(this_leaf);
24 return NULL;
27 static struct cacheinfo *get_cacheinfo(u32 level, enum cache_type type)
30 * Using raw_smp_processor_id() elides a preemptability check, but this
31 * is really indicative of a larger problem: the cacheinfo UABI assumes
32 * that cores have a homonogenous view of the cache hierarchy. That
33 * happens to be the case for the current set of RISC-V systems, but
34 * likely won't be true in general. Since there's no way to provide
35 * correct information for these systems via the current UABI we're
36 * just eliding the check for now.
38 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(raw_smp_processor_id());
39 struct cacheinfo *this_leaf;
40 int index;
42 for (index = 0; index < this_cpu_ci->num_leaves; index++) {
43 this_leaf = this_cpu_ci->info_list + index;
44 if (this_leaf->level == level && this_leaf->type == type)
45 return this_leaf;
48 return NULL;
51 uintptr_t get_cache_size(u32 level, enum cache_type type)
53 struct cacheinfo *this_leaf = get_cacheinfo(level, type);
55 return this_leaf ? this_leaf->size : 0;
58 uintptr_t get_cache_geometry(u32 level, enum cache_type type)
60 struct cacheinfo *this_leaf = get_cacheinfo(level, type);
62 return this_leaf ? (this_leaf->ways_of_associativity << 16 |
63 this_leaf->coherency_line_size) :
67 static void ci_leaf_init(struct cacheinfo *this_leaf,
68 enum cache_type type, unsigned int level)
70 this_leaf->level = level;
71 this_leaf->type = type;
74 int init_cache_level(unsigned int cpu)
76 return init_of_cache_level(cpu);
79 int populate_cache_leaves(unsigned int cpu)
81 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
82 struct cacheinfo *this_leaf = this_cpu_ci->info_list;
83 struct device_node *np = of_cpu_device_node_get(cpu);
84 struct device_node *prev = NULL;
85 int levels = 1, level = 1;
87 if (!acpi_disabled) {
88 int ret, fw_levels, split_levels;
90 ret = acpi_get_cache_info(cpu, &fw_levels, &split_levels);
91 if (ret)
92 return ret;
94 BUG_ON((split_levels > fw_levels) ||
95 (split_levels + fw_levels > this_cpu_ci->num_leaves));
97 for (; level <= this_cpu_ci->num_levels; level++) {
98 if (level <= split_levels) {
99 ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
100 ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
101 } else {
102 ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
105 return 0;
108 if (of_property_read_bool(np, "cache-size"))
109 ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
110 if (of_property_read_bool(np, "i-cache-size"))
111 ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
112 if (of_property_read_bool(np, "d-cache-size"))
113 ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
115 prev = np;
116 while ((np = of_find_next_cache_node(np))) {
117 of_node_put(prev);
118 prev = np;
119 if (!of_device_is_compatible(np, "cache"))
120 break;
121 if (of_property_read_u32(np, "cache-level", &level))
122 break;
123 if (level <= levels)
124 break;
125 if (of_property_read_bool(np, "cache-size"))
126 ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
127 if (of_property_read_bool(np, "i-cache-size"))
128 ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
129 if (of_property_read_bool(np, "d-cache-size"))
130 ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
131 levels = level;
133 of_node_put(np);
135 return 0;