1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Dynamic reconfiguration memory support
5 * Copyright 2017 IBM Corporation
8 #define pr_fmt(fmt) "drmem: " fmt
10 #include <linux/kernel.h>
12 #include <linux/of_fdt.h>
13 #include <linux/memblock.h>
15 #include <asm/drmem.h>
17 static int n_root_addr_cells
, n_root_size_cells
;
19 static struct drmem_lmb_info __drmem_info
;
20 struct drmem_lmb_info
*drmem_info
= &__drmem_info
;
22 u64
drmem_lmb_memory_max(void)
24 struct drmem_lmb
*last_lmb
;
26 last_lmb
= &drmem_info
->lmbs
[drmem_info
->n_lmbs
- 1];
27 return last_lmb
->base_addr
+ drmem_lmb_size();
30 static u32
drmem_lmb_flags(struct drmem_lmb
*lmb
)
33 * Return the value of the lmb flags field minus the reserved
34 * bit used internally for hotplug processing.
36 return lmb
->flags
& ~DRMEM_LMB_RESERVED
;
39 static struct property
*clone_property(struct property
*prop
, u32 prop_sz
)
41 struct property
*new_prop
;
43 new_prop
= kzalloc(sizeof(*new_prop
), GFP_KERNEL
);
47 new_prop
->name
= kstrdup(prop
->name
, GFP_KERNEL
);
48 new_prop
->value
= kzalloc(prop_sz
, GFP_KERNEL
);
49 if (!new_prop
->name
|| !new_prop
->value
) {
50 kfree(new_prop
->name
);
51 kfree(new_prop
->value
);
56 new_prop
->length
= prop_sz
;
57 #if defined(CONFIG_OF_DYNAMIC)
58 of_property_set_flag(new_prop
, OF_DYNAMIC
);
63 static int drmem_update_dt_v1(struct device_node
*memory
,
64 struct property
*prop
)
66 struct property
*new_prop
;
67 struct of_drconf_cell_v1
*dr_cell
;
68 struct drmem_lmb
*lmb
;
71 new_prop
= clone_property(prop
, prop
->length
);
76 *p
++ = cpu_to_be32(drmem_info
->n_lmbs
);
78 dr_cell
= (struct of_drconf_cell_v1
*)p
;
80 for_each_drmem_lmb(lmb
) {
81 dr_cell
->base_addr
= cpu_to_be64(lmb
->base_addr
);
82 dr_cell
->drc_index
= cpu_to_be32(lmb
->drc_index
);
83 dr_cell
->aa_index
= cpu_to_be32(lmb
->aa_index
);
84 dr_cell
->flags
= cpu_to_be32(drmem_lmb_flags(lmb
));
89 of_update_property(memory
, new_prop
);
93 static void init_drconf_v2_cell(struct of_drconf_cell_v2
*dr_cell
,
94 struct drmem_lmb
*lmb
)
96 dr_cell
->base_addr
= cpu_to_be64(lmb
->base_addr
);
97 dr_cell
->drc_index
= cpu_to_be32(lmb
->drc_index
);
98 dr_cell
->aa_index
= cpu_to_be32(lmb
->aa_index
);
99 dr_cell
->flags
= cpu_to_be32(drmem_lmb_flags(lmb
));
102 static int drmem_update_dt_v2(struct device_node
*memory
,
103 struct property
*prop
)
105 struct property
*new_prop
;
106 struct of_drconf_cell_v2
*dr_cell
;
107 struct drmem_lmb
*lmb
, *prev_lmb
;
108 u32 lmb_sets
, prop_sz
, seq_lmbs
;
111 /* First pass, determine how many LMB sets are needed. */
114 for_each_drmem_lmb(lmb
) {
121 if (prev_lmb
->aa_index
!= lmb
->aa_index
||
122 drmem_lmb_flags(prev_lmb
) != drmem_lmb_flags(lmb
))
128 prop_sz
= lmb_sets
* sizeof(*dr_cell
) + sizeof(__be32
);
129 new_prop
= clone_property(prop
, prop_sz
);
134 *p
++ = cpu_to_be32(lmb_sets
);
136 dr_cell
= (struct of_drconf_cell_v2
*)p
;
138 /* Second pass, populate the LMB set data */
141 for_each_drmem_lmb(lmb
) {
142 if (prev_lmb
== NULL
) {
143 /* Start of first LMB set */
145 init_drconf_v2_cell(dr_cell
, lmb
);
150 if (prev_lmb
->aa_index
!= lmb
->aa_index
||
151 drmem_lmb_flags(prev_lmb
) != drmem_lmb_flags(lmb
)) {
152 /* end of one set, start of another */
153 dr_cell
->seq_lmbs
= cpu_to_be32(seq_lmbs
);
156 init_drconf_v2_cell(dr_cell
, lmb
);
165 /* close out last LMB set */
166 dr_cell
->seq_lmbs
= cpu_to_be32(seq_lmbs
);
167 of_update_property(memory
, new_prop
);
171 int drmem_update_dt(void)
173 struct device_node
*memory
;
174 struct property
*prop
;
177 memory
= of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
181 prop
= of_find_property(memory
, "ibm,dynamic-memory", NULL
);
183 rc
= drmem_update_dt_v1(memory
, prop
);
185 prop
= of_find_property(memory
, "ibm,dynamic-memory-v2", NULL
);
187 rc
= drmem_update_dt_v2(memory
, prop
);
194 static void read_drconf_v1_cell(struct drmem_lmb
*lmb
,
197 const __be32
*p
= *prop
;
199 lmb
->base_addr
= of_read_number(p
, n_root_addr_cells
);
200 p
+= n_root_addr_cells
;
201 lmb
->drc_index
= of_read_number(p
++, 1);
203 p
++; /* skip reserved field */
205 lmb
->aa_index
= of_read_number(p
++, 1);
206 lmb
->flags
= of_read_number(p
++, 1);
212 __walk_drmem_v1_lmbs(const __be32
*prop
, const __be32
*usm
, void *data
,
213 int (*func
)(struct drmem_lmb
*, const __be32
**, void *))
215 struct drmem_lmb lmb
;
219 n_lmbs
= of_read_number(prop
++, 1);
220 for (i
= 0; i
< n_lmbs
; i
++) {
221 read_drconf_v1_cell(&lmb
, &prop
);
222 ret
= func(&lmb
, &usm
, data
);
230 static void read_drconf_v2_cell(struct of_drconf_cell_v2
*dr_cell
,
233 const __be32
*p
= *prop
;
235 dr_cell
->seq_lmbs
= of_read_number(p
++, 1);
236 dr_cell
->base_addr
= of_read_number(p
, n_root_addr_cells
);
237 p
+= n_root_addr_cells
;
238 dr_cell
->drc_index
= of_read_number(p
++, 1);
239 dr_cell
->aa_index
= of_read_number(p
++, 1);
240 dr_cell
->flags
= of_read_number(p
++, 1);
246 __walk_drmem_v2_lmbs(const __be32
*prop
, const __be32
*usm
, void *data
,
247 int (*func
)(struct drmem_lmb
*, const __be32
**, void *))
249 struct of_drconf_cell_v2 dr_cell
;
250 struct drmem_lmb lmb
;
254 lmb_sets
= of_read_number(prop
++, 1);
255 for (i
= 0; i
< lmb_sets
; i
++) {
256 read_drconf_v2_cell(&dr_cell
, &prop
);
258 for (j
= 0; j
< dr_cell
.seq_lmbs
; j
++) {
259 lmb
.base_addr
= dr_cell
.base_addr
;
260 dr_cell
.base_addr
+= drmem_lmb_size();
262 lmb
.drc_index
= dr_cell
.drc_index
;
265 lmb
.aa_index
= dr_cell
.aa_index
;
266 lmb
.flags
= dr_cell
.flags
;
268 ret
= func(&lmb
, &usm
, data
);
277 #ifdef CONFIG_PPC_PSERIES
278 int __init
walk_drmem_lmbs_early(unsigned long node
, void *data
,
279 int (*func
)(struct drmem_lmb
*, const __be32
**, void *))
281 const __be32
*prop
, *usm
;
282 int len
, ret
= -ENODEV
;
284 prop
= of_get_flat_dt_prop(node
, "ibm,lmb-size", &len
);
285 if (!prop
|| len
< dt_root_size_cells
* sizeof(__be32
))
288 /* Get the address & size cells */
289 n_root_addr_cells
= dt_root_addr_cells
;
290 n_root_size_cells
= dt_root_size_cells
;
292 drmem_info
->lmb_size
= dt_mem_next_cell(dt_root_size_cells
, &prop
);
294 usm
= of_get_flat_dt_prop(node
, "linux,drconf-usable-memory", &len
);
296 prop
= of_get_flat_dt_prop(node
, "ibm,dynamic-memory", &len
);
298 ret
= __walk_drmem_v1_lmbs(prop
, usm
, data
, func
);
300 prop
= of_get_flat_dt_prop(node
, "ibm,dynamic-memory-v2",
303 ret
= __walk_drmem_v2_lmbs(prop
, usm
, data
, func
);
312 static int init_drmem_lmb_size(struct device_node
*dn
)
317 if (drmem_info
->lmb_size
)
320 prop
= of_get_property(dn
, "ibm,lmb-size", &len
);
321 if (!prop
|| len
< n_root_size_cells
* sizeof(__be32
)) {
322 pr_info("Could not determine LMB size\n");
326 drmem_info
->lmb_size
= of_read_number(prop
, n_root_size_cells
);
331 * Returns the property linux,drconf-usable-memory if
332 * it exists (the property exists only in kexec/kdump kernels,
333 * added by kexec-tools)
335 static const __be32
*of_get_usable_memory(struct device_node
*dn
)
340 prop
= of_get_property(dn
, "linux,drconf-usable-memory", &len
);
341 if (!prop
|| len
< sizeof(unsigned int))
347 int walk_drmem_lmbs(struct device_node
*dn
, void *data
,
348 int (*func
)(struct drmem_lmb
*, const __be32
**, void *))
350 const __be32
*prop
, *usm
;
356 /* Get the address & size cells */
357 of_node_get(of_root
);
358 n_root_addr_cells
= of_n_addr_cells(of_root
);
359 n_root_size_cells
= of_n_size_cells(of_root
);
360 of_node_put(of_root
);
362 if (init_drmem_lmb_size(dn
))
365 usm
= of_get_usable_memory(dn
);
367 prop
= of_get_property(dn
, "ibm,dynamic-memory", NULL
);
369 ret
= __walk_drmem_v1_lmbs(prop
, usm
, data
, func
);
371 prop
= of_get_property(dn
, "ibm,dynamic-memory-v2", NULL
);
373 ret
= __walk_drmem_v2_lmbs(prop
, usm
, data
, func
);
379 static void __init
init_drmem_v1_lmbs(const __be32
*prop
)
381 struct drmem_lmb
*lmb
;
383 drmem_info
->n_lmbs
= of_read_number(prop
++, 1);
384 if (drmem_info
->n_lmbs
== 0)
387 drmem_info
->lmbs
= kcalloc(drmem_info
->n_lmbs
, sizeof(*lmb
),
389 if (!drmem_info
->lmbs
)
392 for_each_drmem_lmb(lmb
)
393 read_drconf_v1_cell(lmb
, &prop
);
396 static void __init
init_drmem_v2_lmbs(const __be32
*prop
)
398 struct drmem_lmb
*lmb
;
399 struct of_drconf_cell_v2 dr_cell
;
404 lmb_sets
= of_read_number(prop
++, 1);
408 /* first pass, calculate the number of LMBs */
410 for (i
= 0; i
< lmb_sets
; i
++) {
411 read_drconf_v2_cell(&dr_cell
, &p
);
412 drmem_info
->n_lmbs
+= dr_cell
.seq_lmbs
;
415 drmem_info
->lmbs
= kcalloc(drmem_info
->n_lmbs
, sizeof(*lmb
),
417 if (!drmem_info
->lmbs
)
420 /* second pass, read in the LMB information */
424 for (i
= 0; i
< lmb_sets
; i
++) {
425 read_drconf_v2_cell(&dr_cell
, &p
);
427 for (j
= 0; j
< dr_cell
.seq_lmbs
; j
++) {
428 lmb
= &drmem_info
->lmbs
[lmb_index
++];
430 lmb
->base_addr
= dr_cell
.base_addr
;
431 dr_cell
.base_addr
+= drmem_info
->lmb_size
;
433 lmb
->drc_index
= dr_cell
.drc_index
;
436 lmb
->aa_index
= dr_cell
.aa_index
;
437 lmb
->flags
= dr_cell
.flags
;
442 static int __init
drmem_init(void)
444 struct device_node
*dn
;
447 dn
= of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
449 pr_info("No dynamic reconfiguration memory found\n");
453 if (init_drmem_lmb_size(dn
)) {
458 prop
= of_get_property(dn
, "ibm,dynamic-memory", NULL
);
460 init_drmem_v1_lmbs(prop
);
462 prop
= of_get_property(dn
, "ibm,dynamic-memory-v2", NULL
);
464 init_drmem_v2_lmbs(prop
);
470 late_initcall(drmem_init
);