Merge tag 'linux-kselftest-kunit-fixes-5.11-rc3' of git://git.kernel.org/pub/scm...
[linux/fpc-iii.git] / arch / powerpc / mm / drmem.c
blob9af3832c9d8dc28ae91d7df7beddf81b1b12f1d5
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Dynamic reconfiguration memory support
5 * Copyright 2017 IBM Corporation
6 */
8 #define pr_fmt(fmt) "drmem: " fmt
10 #include <linux/kernel.h>
11 #include <linux/of.h>
12 #include <linux/of_fdt.h>
13 #include <linux/memblock.h>
14 #include <asm/prom.h>
15 #include <asm/drmem.h>
17 static int n_root_addr_cells, n_root_size_cells;
19 static struct drmem_lmb_info __drmem_info;
20 struct drmem_lmb_info *drmem_info = &__drmem_info;
22 u64 drmem_lmb_memory_max(void)
24 struct drmem_lmb *last_lmb;
26 last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1];
27 return last_lmb->base_addr + drmem_lmb_size();
30 static u32 drmem_lmb_flags(struct drmem_lmb *lmb)
33 * Return the value of the lmb flags field minus the reserved
34 * bit used internally for hotplug processing.
36 return lmb->flags & ~DRMEM_LMB_RESERVED;
39 static struct property *clone_property(struct property *prop, u32 prop_sz)
41 struct property *new_prop;
43 new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
44 if (!new_prop)
45 return NULL;
47 new_prop->name = kstrdup(prop->name, GFP_KERNEL);
48 new_prop->value = kzalloc(prop_sz, GFP_KERNEL);
49 if (!new_prop->name || !new_prop->value) {
50 kfree(new_prop->name);
51 kfree(new_prop->value);
52 kfree(new_prop);
53 return NULL;
56 new_prop->length = prop_sz;
57 #if defined(CONFIG_OF_DYNAMIC)
58 of_property_set_flag(new_prop, OF_DYNAMIC);
59 #endif
60 return new_prop;
63 static int drmem_update_dt_v1(struct device_node *memory,
64 struct property *prop)
66 struct property *new_prop;
67 struct of_drconf_cell_v1 *dr_cell;
68 struct drmem_lmb *lmb;
69 u32 *p;
71 new_prop = clone_property(prop, prop->length);
72 if (!new_prop)
73 return -1;
75 p = new_prop->value;
76 *p++ = cpu_to_be32(drmem_info->n_lmbs);
78 dr_cell = (struct of_drconf_cell_v1 *)p;
80 for_each_drmem_lmb(lmb) {
81 dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
82 dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
83 dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
84 dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
86 dr_cell++;
89 of_update_property(memory, new_prop);
90 return 0;
93 static void init_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
94 struct drmem_lmb *lmb)
96 dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
97 dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
98 dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
99 dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
102 static int drmem_update_dt_v2(struct device_node *memory,
103 struct property *prop)
105 struct property *new_prop;
106 struct of_drconf_cell_v2 *dr_cell;
107 struct drmem_lmb *lmb, *prev_lmb;
108 u32 lmb_sets, prop_sz, seq_lmbs;
109 u32 *p;
111 /* First pass, determine how many LMB sets are needed. */
112 lmb_sets = 0;
113 prev_lmb = NULL;
114 for_each_drmem_lmb(lmb) {
115 if (!prev_lmb) {
116 prev_lmb = lmb;
117 lmb_sets++;
118 continue;
121 if (prev_lmb->aa_index != lmb->aa_index ||
122 drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb))
123 lmb_sets++;
125 prev_lmb = lmb;
128 prop_sz = lmb_sets * sizeof(*dr_cell) + sizeof(__be32);
129 new_prop = clone_property(prop, prop_sz);
130 if (!new_prop)
131 return -1;
133 p = new_prop->value;
134 *p++ = cpu_to_be32(lmb_sets);
136 dr_cell = (struct of_drconf_cell_v2 *)p;
138 /* Second pass, populate the LMB set data */
139 prev_lmb = NULL;
140 seq_lmbs = 0;
141 for_each_drmem_lmb(lmb) {
142 if (prev_lmb == NULL) {
143 /* Start of first LMB set */
144 prev_lmb = lmb;
145 init_drconf_v2_cell(dr_cell, lmb);
146 seq_lmbs++;
147 continue;
150 if (prev_lmb->aa_index != lmb->aa_index ||
151 drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb)) {
152 /* end of one set, start of another */
153 dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
154 dr_cell++;
156 init_drconf_v2_cell(dr_cell, lmb);
157 seq_lmbs = 1;
158 } else {
159 seq_lmbs++;
162 prev_lmb = lmb;
165 /* close out last LMB set */
166 dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
167 of_update_property(memory, new_prop);
168 return 0;
171 int drmem_update_dt(void)
173 struct device_node *memory;
174 struct property *prop;
175 int rc = -1;
177 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
178 if (!memory)
179 return -1;
181 prop = of_find_property(memory, "ibm,dynamic-memory", NULL);
182 if (prop) {
183 rc = drmem_update_dt_v1(memory, prop);
184 } else {
185 prop = of_find_property(memory, "ibm,dynamic-memory-v2", NULL);
186 if (prop)
187 rc = drmem_update_dt_v2(memory, prop);
190 of_node_put(memory);
191 return rc;
194 static void read_drconf_v1_cell(struct drmem_lmb *lmb,
195 const __be32 **prop)
197 const __be32 *p = *prop;
199 lmb->base_addr = of_read_number(p, n_root_addr_cells);
200 p += n_root_addr_cells;
201 lmb->drc_index = of_read_number(p++, 1);
203 p++; /* skip reserved field */
205 lmb->aa_index = of_read_number(p++, 1);
206 lmb->flags = of_read_number(p++, 1);
208 *prop = p;
211 static int
212 __walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm, void *data,
213 int (*func)(struct drmem_lmb *, const __be32 **, void *))
215 struct drmem_lmb lmb;
216 u32 i, n_lmbs;
217 int ret = 0;
219 n_lmbs = of_read_number(prop++, 1);
220 for (i = 0; i < n_lmbs; i++) {
221 read_drconf_v1_cell(&lmb, &prop);
222 ret = func(&lmb, &usm, data);
223 if (ret)
224 break;
227 return ret;
230 static void read_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
231 const __be32 **prop)
233 const __be32 *p = *prop;
235 dr_cell->seq_lmbs = of_read_number(p++, 1);
236 dr_cell->base_addr = of_read_number(p, n_root_addr_cells);
237 p += n_root_addr_cells;
238 dr_cell->drc_index = of_read_number(p++, 1);
239 dr_cell->aa_index = of_read_number(p++, 1);
240 dr_cell->flags = of_read_number(p++, 1);
242 *prop = p;
245 static int
246 __walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm, void *data,
247 int (*func)(struct drmem_lmb *, const __be32 **, void *))
249 struct of_drconf_cell_v2 dr_cell;
250 struct drmem_lmb lmb;
251 u32 i, j, lmb_sets;
252 int ret = 0;
254 lmb_sets = of_read_number(prop++, 1);
255 for (i = 0; i < lmb_sets; i++) {
256 read_drconf_v2_cell(&dr_cell, &prop);
258 for (j = 0; j < dr_cell.seq_lmbs; j++) {
259 lmb.base_addr = dr_cell.base_addr;
260 dr_cell.base_addr += drmem_lmb_size();
262 lmb.drc_index = dr_cell.drc_index;
263 dr_cell.drc_index++;
265 lmb.aa_index = dr_cell.aa_index;
266 lmb.flags = dr_cell.flags;
268 ret = func(&lmb, &usm, data);
269 if (ret)
270 break;
274 return ret;
277 #ifdef CONFIG_PPC_PSERIES
278 int __init walk_drmem_lmbs_early(unsigned long node, void *data,
279 int (*func)(struct drmem_lmb *, const __be32 **, void *))
281 const __be32 *prop, *usm;
282 int len, ret = -ENODEV;
284 prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
285 if (!prop || len < dt_root_size_cells * sizeof(__be32))
286 return ret;
288 /* Get the address & size cells */
289 n_root_addr_cells = dt_root_addr_cells;
290 n_root_size_cells = dt_root_size_cells;
292 drmem_info->lmb_size = dt_mem_next_cell(dt_root_size_cells, &prop);
294 usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory", &len);
296 prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &len);
297 if (prop) {
298 ret = __walk_drmem_v1_lmbs(prop, usm, data, func);
299 } else {
300 prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory-v2",
301 &len);
302 if (prop)
303 ret = __walk_drmem_v2_lmbs(prop, usm, data, func);
306 memblock_dump_all();
307 return ret;
310 #endif
312 static int init_drmem_lmb_size(struct device_node *dn)
314 const __be32 *prop;
315 int len;
317 if (drmem_info->lmb_size)
318 return 0;
320 prop = of_get_property(dn, "ibm,lmb-size", &len);
321 if (!prop || len < n_root_size_cells * sizeof(__be32)) {
322 pr_info("Could not determine LMB size\n");
323 return -1;
326 drmem_info->lmb_size = of_read_number(prop, n_root_size_cells);
327 return 0;
331 * Returns the property linux,drconf-usable-memory if
332 * it exists (the property exists only in kexec/kdump kernels,
333 * added by kexec-tools)
335 static const __be32 *of_get_usable_memory(struct device_node *dn)
337 const __be32 *prop;
338 u32 len;
340 prop = of_get_property(dn, "linux,drconf-usable-memory", &len);
341 if (!prop || len < sizeof(unsigned int))
342 return NULL;
344 return prop;
347 int walk_drmem_lmbs(struct device_node *dn, void *data,
348 int (*func)(struct drmem_lmb *, const __be32 **, void *))
350 const __be32 *prop, *usm;
351 int ret = -ENODEV;
353 if (!of_root)
354 return ret;
356 /* Get the address & size cells */
357 of_node_get(of_root);
358 n_root_addr_cells = of_n_addr_cells(of_root);
359 n_root_size_cells = of_n_size_cells(of_root);
360 of_node_put(of_root);
362 if (init_drmem_lmb_size(dn))
363 return ret;
365 usm = of_get_usable_memory(dn);
367 prop = of_get_property(dn, "ibm,dynamic-memory", NULL);
368 if (prop) {
369 ret = __walk_drmem_v1_lmbs(prop, usm, data, func);
370 } else {
371 prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL);
372 if (prop)
373 ret = __walk_drmem_v2_lmbs(prop, usm, data, func);
376 return ret;
379 static void __init init_drmem_v1_lmbs(const __be32 *prop)
381 struct drmem_lmb *lmb;
383 drmem_info->n_lmbs = of_read_number(prop++, 1);
384 if (drmem_info->n_lmbs == 0)
385 return;
387 drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
388 GFP_KERNEL);
389 if (!drmem_info->lmbs)
390 return;
392 for_each_drmem_lmb(lmb)
393 read_drconf_v1_cell(lmb, &prop);
396 static void __init init_drmem_v2_lmbs(const __be32 *prop)
398 struct drmem_lmb *lmb;
399 struct of_drconf_cell_v2 dr_cell;
400 const __be32 *p;
401 u32 i, j, lmb_sets;
402 int lmb_index;
404 lmb_sets = of_read_number(prop++, 1);
405 if (lmb_sets == 0)
406 return;
408 /* first pass, calculate the number of LMBs */
409 p = prop;
410 for (i = 0; i < lmb_sets; i++) {
411 read_drconf_v2_cell(&dr_cell, &p);
412 drmem_info->n_lmbs += dr_cell.seq_lmbs;
415 drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
416 GFP_KERNEL);
417 if (!drmem_info->lmbs)
418 return;
420 /* second pass, read in the LMB information */
421 lmb_index = 0;
422 p = prop;
424 for (i = 0; i < lmb_sets; i++) {
425 read_drconf_v2_cell(&dr_cell, &p);
427 for (j = 0; j < dr_cell.seq_lmbs; j++) {
428 lmb = &drmem_info->lmbs[lmb_index++];
430 lmb->base_addr = dr_cell.base_addr;
431 dr_cell.base_addr += drmem_info->lmb_size;
433 lmb->drc_index = dr_cell.drc_index;
434 dr_cell.drc_index++;
436 lmb->aa_index = dr_cell.aa_index;
437 lmb->flags = dr_cell.flags;
442 static int __init drmem_init(void)
444 struct device_node *dn;
445 const __be32 *prop;
447 dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
448 if (!dn) {
449 pr_info("No dynamic reconfiguration memory found\n");
450 return 0;
453 if (init_drmem_lmb_size(dn)) {
454 of_node_put(dn);
455 return 0;
458 prop = of_get_property(dn, "ibm,dynamic-memory", NULL);
459 if (prop) {
460 init_drmem_v1_lmbs(prop);
461 } else {
462 prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL);
463 if (prop)
464 init_drmem_v2_lmbs(prop);
467 of_node_put(dn);
468 return 0;
470 late_initcall(drmem_init);