WIP FPC-III support
[linux/fpc-iii.git] / arch / powerpc / platforms / powernv / opal-imc.c
blob7824cc364bc408b2e9af9ffceddb5aa911f79c81
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * OPAL IMC interface detection driver
4 * Supported on POWERNV platform
6 * Copyright (C) 2017 Madhavan Srinivasan, IBM Corporation.
7 * (C) 2017 Anju T Sudhakar, IBM Corporation.
8 * (C) 2017 Hemant K Shaw, IBM Corporation.
9 */
10 #include <linux/kernel.h>
11 #include <linux/platform_device.h>
12 #include <linux/of.h>
13 #include <linux/of_address.h>
14 #include <linux/of_platform.h>
15 #include <linux/crash_dump.h>
16 #include <asm/opal.h>
17 #include <asm/io.h>
18 #include <asm/imc-pmu.h>
19 #include <asm/cputhreads.h>
20 #include <asm/debugfs.h>
22 static struct dentry *imc_debugfs_parent;
24 /* Helpers to export imc command and mode via debugfs */
25 static int imc_mem_get(void *data, u64 *val)
27 *val = cpu_to_be64(*(u64 *)data);
28 return 0;
31 static int imc_mem_set(void *data, u64 val)
33 *(u64 *)data = cpu_to_be64(val);
34 return 0;
36 DEFINE_DEBUGFS_ATTRIBUTE(fops_imc_x64, imc_mem_get, imc_mem_set, "0x%016llx\n");
38 static void imc_debugfs_create_x64(const char *name, umode_t mode,
39 struct dentry *parent, u64 *value)
41 debugfs_create_file_unsafe(name, mode, parent, value, &fops_imc_x64);
45 * export_imc_mode_and_cmd: Create a debugfs interface
46 * for imc_cmd and imc_mode
47 * for each node in the system.
48 * imc_mode and imc_cmd can be changed by echo into
49 * this interface.
51 static void export_imc_mode_and_cmd(struct device_node *node,
52 struct imc_pmu *pmu_ptr)
54 static u64 loc, *imc_mode_addr, *imc_cmd_addr;
55 char mode[16], cmd[16];
56 u32 cb_offset;
57 struct imc_mem_info *ptr = pmu_ptr->mem_info;
59 imc_debugfs_parent = debugfs_create_dir("imc", powerpc_debugfs_root);
61 if (of_property_read_u32(node, "cb_offset", &cb_offset))
62 cb_offset = IMC_CNTL_BLK_OFFSET;
64 while (ptr->vbase != NULL) {
65 loc = (u64)(ptr->vbase) + cb_offset;
66 imc_mode_addr = (u64 *)(loc + IMC_CNTL_BLK_MODE_OFFSET);
67 sprintf(mode, "imc_mode_%d", (u32)(ptr->id));
68 imc_debugfs_create_x64(mode, 0600, imc_debugfs_parent,
69 imc_mode_addr);
71 imc_cmd_addr = (u64 *)(loc + IMC_CNTL_BLK_CMD_OFFSET);
72 sprintf(cmd, "imc_cmd_%d", (u32)(ptr->id));
73 imc_debugfs_create_x64(cmd, 0600, imc_debugfs_parent,
74 imc_cmd_addr);
75 ptr++;
80 * imc_get_mem_addr_nest: Function to get nest counter memory region
81 * for each chip
83 static int imc_get_mem_addr_nest(struct device_node *node,
84 struct imc_pmu *pmu_ptr,
85 u32 offset)
87 int nr_chips = 0, i;
88 u64 *base_addr_arr, baddr;
89 u32 *chipid_arr;
91 nr_chips = of_property_count_u32_elems(node, "chip-id");
92 if (nr_chips <= 0)
93 return -ENODEV;
95 base_addr_arr = kcalloc(nr_chips, sizeof(*base_addr_arr), GFP_KERNEL);
96 if (!base_addr_arr)
97 return -ENOMEM;
99 chipid_arr = kcalloc(nr_chips, sizeof(*chipid_arr), GFP_KERNEL);
100 if (!chipid_arr) {
101 kfree(base_addr_arr);
102 return -ENOMEM;
105 if (of_property_read_u32_array(node, "chip-id", chipid_arr, nr_chips))
106 goto error;
108 if (of_property_read_u64_array(node, "base-addr", base_addr_arr,
109 nr_chips))
110 goto error;
112 pmu_ptr->mem_info = kcalloc(nr_chips + 1, sizeof(*pmu_ptr->mem_info),
113 GFP_KERNEL);
114 if (!pmu_ptr->mem_info)
115 goto error;
117 for (i = 0; i < nr_chips; i++) {
118 pmu_ptr->mem_info[i].id = chipid_arr[i];
119 baddr = base_addr_arr[i] + offset;
120 pmu_ptr->mem_info[i].vbase = phys_to_virt(baddr);
123 pmu_ptr->imc_counter_mmaped = true;
124 kfree(base_addr_arr);
125 kfree(chipid_arr);
126 return 0;
128 error:
129 kfree(base_addr_arr);
130 kfree(chipid_arr);
131 return -1;
135 * imc_pmu_create : Takes the parent device which is the pmu unit, pmu_index
136 * and domain as the inputs.
137 * Allocates memory for the struct imc_pmu, sets up its domain, size and offsets
139 static struct imc_pmu *imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
141 int ret = 0;
142 struct imc_pmu *pmu_ptr;
143 u32 offset;
145 /* Return for unknown domain */
146 if (domain < 0)
147 return NULL;
149 /* memory for pmu */
150 pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL);
151 if (!pmu_ptr)
152 return NULL;
154 /* Set the domain */
155 pmu_ptr->domain = domain;
157 ret = of_property_read_u32(parent, "size", &pmu_ptr->counter_mem_size);
158 if (ret)
159 goto free_pmu;
161 if (!of_property_read_u32(parent, "offset", &offset)) {
162 if (imc_get_mem_addr_nest(parent, pmu_ptr, offset))
163 goto free_pmu;
166 /* Function to register IMC pmu */
167 ret = init_imc_pmu(parent, pmu_ptr, pmu_index);
168 if (ret) {
169 pr_err("IMC PMU %s Register failed\n", pmu_ptr->pmu.name);
170 kfree(pmu_ptr->pmu.name);
171 if (pmu_ptr->domain == IMC_DOMAIN_NEST)
172 kfree(pmu_ptr->mem_info);
173 kfree(pmu_ptr);
174 return NULL;
177 return pmu_ptr;
179 free_pmu:
180 kfree(pmu_ptr);
181 return NULL;
184 static void disable_nest_pmu_counters(void)
186 int nid, cpu;
187 const struct cpumask *l_cpumask;
189 get_online_cpus();
190 for_each_node_with_cpus(nid) {
191 l_cpumask = cpumask_of_node(nid);
192 cpu = cpumask_first_and(l_cpumask, cpu_online_mask);
193 if (cpu >= nr_cpu_ids)
194 continue;
195 opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
196 get_hard_smp_processor_id(cpu));
198 put_online_cpus();
201 static void disable_core_pmu_counters(void)
203 cpumask_t cores_map;
204 int cpu, rc;
206 get_online_cpus();
207 /* Disable the IMC Core functions */
208 cores_map = cpu_online_cores_map();
209 for_each_cpu(cpu, &cores_map) {
210 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
211 get_hard_smp_processor_id(cpu));
212 if (rc)
213 pr_err("%s: Failed to stop Core (cpu = %d)\n",
214 __FUNCTION__, cpu);
216 put_online_cpus();
219 int get_max_nest_dev(void)
221 struct device_node *node;
222 u32 pmu_units = 0, type;
224 for_each_compatible_node(node, NULL, IMC_DTB_UNIT_COMPAT) {
225 if (of_property_read_u32(node, "type", &type))
226 continue;
228 if (type == IMC_TYPE_CHIP)
229 pmu_units++;
232 return pmu_units;
235 static int opal_imc_counters_probe(struct platform_device *pdev)
237 struct device_node *imc_dev = pdev->dev.of_node;
238 struct imc_pmu *pmu;
239 int pmu_count = 0, domain;
240 bool core_imc_reg = false, thread_imc_reg = false;
241 u32 type;
244 * Check whether this is kdump kernel. If yes, force the engines to
245 * stop and return.
247 if (is_kdump_kernel()) {
248 disable_nest_pmu_counters();
249 disable_core_pmu_counters();
250 return -ENODEV;
253 for_each_compatible_node(imc_dev, NULL, IMC_DTB_UNIT_COMPAT) {
254 pmu = NULL;
255 if (of_property_read_u32(imc_dev, "type", &type)) {
256 pr_warn("IMC Device without type property\n");
257 continue;
260 switch (type) {
261 case IMC_TYPE_CHIP:
262 domain = IMC_DOMAIN_NEST;
263 break;
264 case IMC_TYPE_CORE:
265 domain =IMC_DOMAIN_CORE;
266 break;
267 case IMC_TYPE_THREAD:
268 domain = IMC_DOMAIN_THREAD;
269 break;
270 case IMC_TYPE_TRACE:
271 domain = IMC_DOMAIN_TRACE;
272 break;
273 default:
274 pr_warn("IMC Unknown Device type \n");
275 domain = -1;
276 break;
279 pmu = imc_pmu_create(imc_dev, pmu_count, domain);
280 if (pmu != NULL) {
281 if (domain == IMC_DOMAIN_NEST) {
282 if (!imc_debugfs_parent)
283 export_imc_mode_and_cmd(imc_dev, pmu);
284 pmu_count++;
286 if (domain == IMC_DOMAIN_CORE)
287 core_imc_reg = true;
288 if (domain == IMC_DOMAIN_THREAD)
289 thread_imc_reg = true;
293 /* If core imc is not registered, unregister thread-imc */
294 if (!core_imc_reg && thread_imc_reg)
295 unregister_thread_imc();
297 return 0;
300 static void opal_imc_counters_shutdown(struct platform_device *pdev)
303 * Function only stops the engines which is bare minimum.
304 * TODO: Need to handle proper memory cleanup and pmu
305 * unregister.
307 disable_nest_pmu_counters();
308 disable_core_pmu_counters();
311 static const struct of_device_id opal_imc_match[] = {
312 { .compatible = IMC_DTB_COMPAT },
316 static struct platform_driver opal_imc_driver = {
317 .driver = {
318 .name = "opal-imc-counters",
319 .of_match_table = opal_imc_match,
321 .probe = opal_imc_counters_probe,
322 .shutdown = opal_imc_counters_shutdown,
325 builtin_platform_driver(opal_imc_driver);