rtnetlink: check DO_SETLINK_NOTIFY correctly in do_setlink
[linux/fpc-iii.git] / arch / powerpc / platforms / pseries / hotplug-memory.c
blob1d48ab424bd9029a1da7ec310a76bd119c582961
1 /*
2 * pseries Memory Hotplug infrastructure.
4 * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define pr_fmt(fmt) "pseries-hotplug-mem: " fmt
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/memblock.h>
17 #include <linux/memory.h>
18 #include <linux/memory_hotplug.h>
19 #include <linux/slab.h>
21 #include <asm/firmware.h>
22 #include <asm/machdep.h>
23 #include <asm/prom.h>
24 #include <asm/sparsemem.h>
25 #include <asm/fadump.h>
26 #include "pseries.h"
28 static bool rtas_hp_event;
30 unsigned long pseries_memory_block_size(void)
32 struct device_node *np;
33 unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
34 struct resource r;
36 np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
37 if (np) {
38 const __be64 *size;
40 size = of_get_property(np, "ibm,lmb-size", NULL);
41 if (size)
42 memblock_size = be64_to_cpup(size);
43 of_node_put(np);
44 } else if (machine_is(pseries)) {
45 /* This fallback really only applies to pseries */
46 unsigned int memzero_size = 0;
48 np = of_find_node_by_path("/memory@0");
49 if (np) {
50 if (!of_address_to_resource(np, 0, &r))
51 memzero_size = resource_size(&r);
52 of_node_put(np);
55 if (memzero_size) {
56 /* We now know the size of memory@0, use this to find
57 * the first memoryblock and get its size.
59 char buf[64];
61 sprintf(buf, "/memory@%x", memzero_size);
62 np = of_find_node_by_path(buf);
63 if (np) {
64 if (!of_address_to_resource(np, 0, &r))
65 memblock_size = resource_size(&r);
66 of_node_put(np);
70 return memblock_size;
73 static void dlpar_free_property(struct property *prop)
75 kfree(prop->name);
76 kfree(prop->value);
77 kfree(prop);
80 static struct property *dlpar_clone_property(struct property *prop,
81 u32 prop_size)
83 struct property *new_prop;
85 new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
86 if (!new_prop)
87 return NULL;
89 new_prop->name = kstrdup(prop->name, GFP_KERNEL);
90 new_prop->value = kzalloc(prop_size, GFP_KERNEL);
91 if (!new_prop->name || !new_prop->value) {
92 dlpar_free_property(new_prop);
93 return NULL;
96 memcpy(new_prop->value, prop->value, prop->length);
97 new_prop->length = prop_size;
99 of_property_set_flag(new_prop, OF_DYNAMIC);
100 return new_prop;
103 static struct property *dlpar_clone_drconf_property(struct device_node *dn)
105 struct property *prop, *new_prop;
106 struct of_drconf_cell *lmbs;
107 u32 num_lmbs, *p;
108 int i;
110 prop = of_find_property(dn, "ibm,dynamic-memory", NULL);
111 if (!prop)
112 return NULL;
114 new_prop = dlpar_clone_property(prop, prop->length);
115 if (!new_prop)
116 return NULL;
118 /* Convert the property to cpu endian-ness */
119 p = new_prop->value;
120 *p = be32_to_cpu(*p);
122 num_lmbs = *p++;
123 lmbs = (struct of_drconf_cell *)p;
125 for (i = 0; i < num_lmbs; i++) {
126 lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
127 lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
128 lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
129 lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
132 return new_prop;
135 static void dlpar_update_drconf_property(struct device_node *dn,
136 struct property *prop)
138 struct of_drconf_cell *lmbs;
139 u32 num_lmbs, *p;
140 int i;
142 /* Convert the property back to BE */
143 p = prop->value;
144 num_lmbs = *p;
145 *p = cpu_to_be32(*p);
146 p++;
148 lmbs = (struct of_drconf_cell *)p;
149 for (i = 0; i < num_lmbs; i++) {
150 lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
151 lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
152 lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
153 lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
156 rtas_hp_event = true;
157 of_update_property(dn, prop);
158 rtas_hp_event = false;
161 static int dlpar_update_device_tree_lmb(struct of_drconf_cell *lmb)
163 struct device_node *dn;
164 struct property *prop;
165 struct of_drconf_cell *lmbs;
166 u32 *p, num_lmbs;
167 int i;
169 dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
170 if (!dn)
171 return -ENODEV;
173 prop = dlpar_clone_drconf_property(dn);
174 if (!prop) {
175 of_node_put(dn);
176 return -ENODEV;
179 p = prop->value;
180 num_lmbs = *p++;
181 lmbs = (struct of_drconf_cell *)p;
183 for (i = 0; i < num_lmbs; i++) {
184 if (lmbs[i].drc_index == lmb->drc_index) {
185 lmbs[i].flags = lmb->flags;
186 lmbs[i].aa_index = lmb->aa_index;
188 dlpar_update_drconf_property(dn, prop);
189 break;
193 of_node_put(dn);
194 return 0;
197 static u32 find_aa_index(struct device_node *dr_node,
198 struct property *ala_prop, const u32 *lmb_assoc)
200 u32 *assoc_arrays;
201 u32 aa_index;
202 int aa_arrays, aa_array_entries, aa_array_sz;
203 int i, index;
206 * The ibm,associativity-lookup-arrays property is defined to be
207 * a 32-bit value specifying the number of associativity arrays
208 * followed by a 32-bitvalue specifying the number of entries per
209 * array, followed by the associativity arrays.
211 assoc_arrays = ala_prop->value;
213 aa_arrays = be32_to_cpu(assoc_arrays[0]);
214 aa_array_entries = be32_to_cpu(assoc_arrays[1]);
215 aa_array_sz = aa_array_entries * sizeof(u32);
217 aa_index = -1;
218 for (i = 0; i < aa_arrays; i++) {
219 index = (i * aa_array_entries) + 2;
221 if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
222 continue;
224 aa_index = i;
225 break;
228 if (aa_index == -1) {
229 struct property *new_prop;
230 u32 new_prop_size;
232 new_prop_size = ala_prop->length + aa_array_sz;
233 new_prop = dlpar_clone_property(ala_prop, new_prop_size);
234 if (!new_prop)
235 return -1;
237 assoc_arrays = new_prop->value;
239 /* increment the number of entries in the lookup array */
240 assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
242 /* copy the new associativity into the lookup array */
243 index = aa_arrays * aa_array_entries + 2;
244 memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
246 of_update_property(dr_node, new_prop);
249 * The associativity lookup array index for this lmb is
250 * number of entries - 1 since we added its associativity
251 * to the end of the lookup array.
253 aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
256 return aa_index;
259 static u32 lookup_lmb_associativity_index(struct of_drconf_cell *lmb)
261 struct device_node *parent, *lmb_node, *dr_node;
262 struct property *ala_prop;
263 const u32 *lmb_assoc;
264 u32 aa_index;
266 parent = of_find_node_by_path("/");
267 if (!parent)
268 return -ENODEV;
270 lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index),
271 parent);
272 of_node_put(parent);
273 if (!lmb_node)
274 return -EINVAL;
276 lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL);
277 if (!lmb_assoc) {
278 dlpar_free_cc_nodes(lmb_node);
279 return -ENODEV;
282 dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
283 if (!dr_node) {
284 dlpar_free_cc_nodes(lmb_node);
285 return -ENODEV;
288 ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
289 NULL);
290 if (!ala_prop) {
291 of_node_put(dr_node);
292 dlpar_free_cc_nodes(lmb_node);
293 return -ENODEV;
296 aa_index = find_aa_index(dr_node, ala_prop, lmb_assoc);
298 dlpar_free_cc_nodes(lmb_node);
299 return aa_index;
302 static int dlpar_add_device_tree_lmb(struct of_drconf_cell *lmb)
304 int aa_index;
306 lmb->flags |= DRCONF_MEM_ASSIGNED;
308 aa_index = lookup_lmb_associativity_index(lmb);
309 if (aa_index < 0) {
310 pr_err("Couldn't find associativity index for drc index %x\n",
311 lmb->drc_index);
312 return aa_index;
315 lmb->aa_index = aa_index;
316 return dlpar_update_device_tree_lmb(lmb);
319 static int dlpar_remove_device_tree_lmb(struct of_drconf_cell *lmb)
321 lmb->flags &= ~DRCONF_MEM_ASSIGNED;
322 lmb->aa_index = 0xffffffff;
323 return dlpar_update_device_tree_lmb(lmb);
326 static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
328 unsigned long section_nr;
329 struct mem_section *mem_sect;
330 struct memory_block *mem_block;
332 section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
333 mem_sect = __nr_to_section(section_nr);
335 mem_block = find_memory_block(mem_sect);
336 return mem_block;
339 static int dlpar_change_lmb_state(struct of_drconf_cell *lmb, bool online)
341 struct memory_block *mem_block;
342 int rc;
344 mem_block = lmb_to_memblock(lmb);
345 if (!mem_block)
346 return -EINVAL;
348 if (online && mem_block->dev.offline)
349 rc = device_online(&mem_block->dev);
350 else if (!online && !mem_block->dev.offline)
351 rc = device_offline(&mem_block->dev);
352 else
353 rc = 0;
355 put_device(&mem_block->dev);
357 return rc;
360 static int dlpar_online_lmb(struct of_drconf_cell *lmb)
362 return dlpar_change_lmb_state(lmb, true);
365 #ifdef CONFIG_MEMORY_HOTREMOVE
366 static int dlpar_offline_lmb(struct of_drconf_cell *lmb)
368 return dlpar_change_lmb_state(lmb, false);
371 static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
373 unsigned long block_sz, start_pfn;
374 int sections_per_block;
375 int i, nid;
377 start_pfn = base >> PAGE_SHIFT;
379 lock_device_hotplug();
381 if (!pfn_valid(start_pfn))
382 goto out;
384 block_sz = pseries_memory_block_size();
385 sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
386 nid = memory_add_physaddr_to_nid(base);
388 for (i = 0; i < sections_per_block; i++) {
389 remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
390 base += MIN_MEMORY_BLOCK_SIZE;
393 out:
394 /* Update memory regions for memory remove */
395 memblock_remove(base, memblock_size);
396 unlock_device_hotplug();
397 return 0;
400 static int pseries_remove_mem_node(struct device_node *np)
402 const char *type;
403 const __be32 *regs;
404 unsigned long base;
405 unsigned int lmb_size;
406 int ret = -EINVAL;
409 * Check to see if we are actually removing memory
411 type = of_get_property(np, "device_type", NULL);
412 if (type == NULL || strcmp(type, "memory") != 0)
413 return 0;
416 * Find the base address and size of the memblock
418 regs = of_get_property(np, "reg", NULL);
419 if (!regs)
420 return ret;
422 base = be64_to_cpu(*(unsigned long *)regs);
423 lmb_size = be32_to_cpu(regs[3]);
425 pseries_remove_memblock(base, lmb_size);
426 return 0;
429 static bool lmb_is_removable(struct of_drconf_cell *lmb)
431 int i, scns_per_block;
432 int rc = 1;
433 unsigned long pfn, block_sz;
434 u64 phys_addr;
436 if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
437 return false;
439 block_sz = memory_block_size_bytes();
440 scns_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
441 phys_addr = lmb->base_addr;
443 #ifdef CONFIG_FA_DUMP
444 /* Don't hot-remove memory that falls in fadump boot memory area */
445 if (is_fadump_boot_memory_area(phys_addr, block_sz))
446 return false;
447 #endif
449 for (i = 0; i < scns_per_block; i++) {
450 pfn = PFN_DOWN(phys_addr);
451 if (!pfn_present(pfn))
452 continue;
454 rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
455 phys_addr += MIN_MEMORY_BLOCK_SIZE;
458 return rc ? true : false;
461 static int dlpar_add_lmb(struct of_drconf_cell *);
463 static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
465 unsigned long block_sz;
466 int nid, rc;
468 if (!lmb_is_removable(lmb))
469 return -EINVAL;
471 rc = dlpar_offline_lmb(lmb);
472 if (rc)
473 return rc;
475 block_sz = pseries_memory_block_size();
476 nid = memory_add_physaddr_to_nid(lmb->base_addr);
478 remove_memory(nid, lmb->base_addr, block_sz);
480 /* Update memory regions for memory remove */
481 memblock_remove(lmb->base_addr, block_sz);
483 dlpar_remove_device_tree_lmb(lmb);
484 return 0;
487 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove,
488 struct property *prop)
490 struct of_drconf_cell *lmbs;
491 int lmbs_removed = 0;
492 int lmbs_available = 0;
493 u32 num_lmbs, *p;
494 int i, rc;
496 pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
498 if (lmbs_to_remove == 0)
499 return -EINVAL;
501 p = prop->value;
502 num_lmbs = *p++;
503 lmbs = (struct of_drconf_cell *)p;
505 /* Validate that there are enough LMBs to satisfy the request */
506 for (i = 0; i < num_lmbs; i++) {
507 if (lmb_is_removable(&lmbs[i]))
508 lmbs_available++;
511 if (lmbs_available < lmbs_to_remove) {
512 pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
513 lmbs_available, lmbs_to_remove);
514 return -EINVAL;
517 for (i = 0; i < num_lmbs && lmbs_removed < lmbs_to_remove; i++) {
518 rc = dlpar_remove_lmb(&lmbs[i]);
519 if (rc)
520 continue;
522 lmbs_removed++;
524 /* Mark this lmb so we can add it later if all of the
525 * requested LMBs cannot be removed.
527 lmbs[i].reserved = 1;
530 if (lmbs_removed != lmbs_to_remove) {
531 pr_err("Memory hot-remove failed, adding LMB's back\n");
533 for (i = 0; i < num_lmbs; i++) {
534 if (!lmbs[i].reserved)
535 continue;
537 rc = dlpar_add_lmb(&lmbs[i]);
538 if (rc)
539 pr_err("Failed to add LMB back, drc index %x\n",
540 lmbs[i].drc_index);
542 lmbs[i].reserved = 0;
545 rc = -EINVAL;
546 } else {
547 for (i = 0; i < num_lmbs; i++) {
548 if (!lmbs[i].reserved)
549 continue;
551 dlpar_release_drc(lmbs[i].drc_index);
552 pr_info("Memory at %llx was hot-removed\n",
553 lmbs[i].base_addr);
555 lmbs[i].reserved = 0;
557 rc = 0;
560 return rc;
563 static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop)
565 struct of_drconf_cell *lmbs;
566 u32 num_lmbs, *p;
567 int lmb_found;
568 int i, rc;
570 pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index);
572 p = prop->value;
573 num_lmbs = *p++;
574 lmbs = (struct of_drconf_cell *)p;
576 lmb_found = 0;
577 for (i = 0; i < num_lmbs; i++) {
578 if (lmbs[i].drc_index == drc_index) {
579 lmb_found = 1;
580 rc = dlpar_remove_lmb(&lmbs[i]);
581 if (!rc)
582 dlpar_release_drc(lmbs[i].drc_index);
584 break;
588 if (!lmb_found)
589 rc = -EINVAL;
591 if (rc)
592 pr_info("Failed to hot-remove memory at %llx\n",
593 lmbs[i].base_addr);
594 else
595 pr_info("Memory at %llx was hot-removed\n", lmbs[i].base_addr);
597 return rc;
600 static int dlpar_memory_readd_by_index(u32 drc_index, struct property *prop)
602 struct of_drconf_cell *lmbs;
603 u32 num_lmbs, *p;
604 int lmb_found;
605 int i, rc;
607 pr_info("Attempting to update LMB, drc index %x\n", drc_index);
609 p = prop->value;
610 num_lmbs = *p++;
611 lmbs = (struct of_drconf_cell *)p;
613 lmb_found = 0;
614 for (i = 0; i < num_lmbs; i++) {
615 if (lmbs[i].drc_index == drc_index) {
616 lmb_found = 1;
617 rc = dlpar_remove_lmb(&lmbs[i]);
618 if (!rc) {
619 rc = dlpar_add_lmb(&lmbs[i]);
620 if (rc)
621 dlpar_release_drc(lmbs[i].drc_index);
623 break;
627 if (!lmb_found)
628 rc = -EINVAL;
630 if (rc)
631 pr_info("Failed to update memory at %llx\n",
632 lmbs[i].base_addr);
633 else
634 pr_info("Memory at %llx was updated\n", lmbs[i].base_addr);
636 return rc;
639 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index,
640 struct property *prop)
642 struct of_drconf_cell *lmbs;
643 u32 num_lmbs, *p;
644 int i, rc, start_lmb_found;
645 int lmbs_available = 0, start_index = 0, end_index;
647 pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
648 lmbs_to_remove, drc_index);
650 if (lmbs_to_remove == 0)
651 return -EINVAL;
653 p = prop->value;
654 num_lmbs = *p++;
655 lmbs = (struct of_drconf_cell *)p;
656 start_lmb_found = 0;
658 /* Navigate to drc_index */
659 while (start_index < num_lmbs) {
660 if (lmbs[start_index].drc_index == drc_index) {
661 start_lmb_found = 1;
662 break;
665 start_index++;
668 if (!start_lmb_found)
669 return -EINVAL;
671 end_index = start_index + lmbs_to_remove;
673 /* Validate that there are enough LMBs to satisfy the request */
674 for (i = start_index; i < end_index; i++) {
675 if (lmbs[i].flags & DRCONF_MEM_RESERVED)
676 break;
678 lmbs_available++;
681 if (lmbs_available < lmbs_to_remove)
682 return -EINVAL;
684 for (i = start_index; i < end_index; i++) {
685 if (!(lmbs[i].flags & DRCONF_MEM_ASSIGNED))
686 continue;
688 rc = dlpar_remove_lmb(&lmbs[i]);
689 if (rc)
690 break;
692 lmbs[i].reserved = 1;
695 if (rc) {
696 pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n");
698 for (i = start_index; i < end_index; i++) {
699 if (!lmbs[i].reserved)
700 continue;
702 rc = dlpar_add_lmb(&lmbs[i]);
703 if (rc)
704 pr_err("Failed to add LMB, drc index %x\n",
705 be32_to_cpu(lmbs[i].drc_index));
707 lmbs[i].reserved = 0;
709 rc = -EINVAL;
710 } else {
711 for (i = start_index; i < end_index; i++) {
712 if (!lmbs[i].reserved)
713 continue;
715 dlpar_release_drc(lmbs[i].drc_index);
716 pr_info("Memory at %llx (drc index %x) was hot-removed\n",
717 lmbs[i].base_addr, lmbs[i].drc_index);
719 lmbs[i].reserved = 0;
723 return rc;
726 #else
727 static inline int pseries_remove_memblock(unsigned long base,
728 unsigned int memblock_size)
730 return -EOPNOTSUPP;
732 static inline int pseries_remove_mem_node(struct device_node *np)
734 return 0;
736 static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog)
738 return -EOPNOTSUPP;
740 static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
742 return -EOPNOTSUPP;
744 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove,
745 struct property *prop)
747 return -EOPNOTSUPP;
749 static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop)
751 return -EOPNOTSUPP;
753 static int dlpar_memory_readd_by_index(u32 drc_index, struct property *prop)
755 return -EOPNOTSUPP;
758 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index,
759 struct property *prop)
761 return -EOPNOTSUPP;
763 #endif /* CONFIG_MEMORY_HOTREMOVE */
765 static int dlpar_add_lmb(struct of_drconf_cell *lmb)
767 unsigned long block_sz;
768 int nid, rc;
770 if (lmb->flags & DRCONF_MEM_ASSIGNED)
771 return -EINVAL;
773 rc = dlpar_add_device_tree_lmb(lmb);
774 if (rc) {
775 pr_err("Couldn't update device tree for drc index %x\n",
776 lmb->drc_index);
777 dlpar_release_drc(lmb->drc_index);
778 return rc;
781 block_sz = memory_block_size_bytes();
783 /* Find the node id for this address */
784 nid = memory_add_physaddr_to_nid(lmb->base_addr);
786 /* Add the memory */
787 rc = add_memory(nid, lmb->base_addr, block_sz);
788 if (rc) {
789 dlpar_remove_device_tree_lmb(lmb);
790 return rc;
793 rc = dlpar_online_lmb(lmb);
794 if (rc) {
795 remove_memory(nid, lmb->base_addr, block_sz);
796 dlpar_remove_device_tree_lmb(lmb);
797 } else {
798 lmb->flags |= DRCONF_MEM_ASSIGNED;
801 return rc;
804 static int dlpar_memory_add_by_count(u32 lmbs_to_add, struct property *prop)
806 struct of_drconf_cell *lmbs;
807 u32 num_lmbs, *p;
808 int lmbs_available = 0;
809 int lmbs_added = 0;
810 int i, rc;
812 pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
814 if (lmbs_to_add == 0)
815 return -EINVAL;
817 p = prop->value;
818 num_lmbs = *p++;
819 lmbs = (struct of_drconf_cell *)p;
821 /* Validate that there are enough LMBs to satisfy the request */
822 for (i = 0; i < num_lmbs; i++) {
823 if (!(lmbs[i].flags & DRCONF_MEM_ASSIGNED))
824 lmbs_available++;
827 if (lmbs_available < lmbs_to_add)
828 return -EINVAL;
830 for (i = 0; i < num_lmbs && lmbs_to_add != lmbs_added; i++) {
831 if (lmbs[i].flags & DRCONF_MEM_ASSIGNED)
832 continue;
834 rc = dlpar_acquire_drc(lmbs[i].drc_index);
835 if (rc)
836 continue;
838 rc = dlpar_add_lmb(&lmbs[i]);
839 if (rc) {
840 dlpar_release_drc(lmbs[i].drc_index);
841 continue;
844 lmbs_added++;
846 /* Mark this lmb so we can remove it later if all of the
847 * requested LMBs cannot be added.
849 lmbs[i].reserved = 1;
852 if (lmbs_added != lmbs_to_add) {
853 pr_err("Memory hot-add failed, removing any added LMBs\n");
855 for (i = 0; i < num_lmbs; i++) {
856 if (!lmbs[i].reserved)
857 continue;
859 rc = dlpar_remove_lmb(&lmbs[i]);
860 if (rc)
861 pr_err("Failed to remove LMB, drc index %x\n",
862 be32_to_cpu(lmbs[i].drc_index));
863 else
864 dlpar_release_drc(lmbs[i].drc_index);
866 rc = -EINVAL;
867 } else {
868 for (i = 0; i < num_lmbs; i++) {
869 if (!lmbs[i].reserved)
870 continue;
872 pr_info("Memory at %llx (drc index %x) was hot-added\n",
873 lmbs[i].base_addr, lmbs[i].drc_index);
874 lmbs[i].reserved = 0;
876 rc = 0;
879 return rc;
882 static int dlpar_memory_add_by_index(u32 drc_index, struct property *prop)
884 struct of_drconf_cell *lmbs;
885 u32 num_lmbs, *p;
886 int i, lmb_found;
887 int rc;
889 pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
891 p = prop->value;
892 num_lmbs = *p++;
893 lmbs = (struct of_drconf_cell *)p;
895 lmb_found = 0;
896 for (i = 0; i < num_lmbs; i++) {
897 if (lmbs[i].drc_index == drc_index) {
898 lmb_found = 1;
899 rc = dlpar_acquire_drc(lmbs[i].drc_index);
900 if (!rc) {
901 rc = dlpar_add_lmb(&lmbs[i]);
902 if (rc)
903 dlpar_release_drc(lmbs[i].drc_index);
906 break;
910 if (!lmb_found)
911 rc = -EINVAL;
913 if (rc)
914 pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
915 else
916 pr_info("Memory at %llx (drc index %x) was hot-added\n",
917 lmbs[i].base_addr, drc_index);
919 return rc;
922 static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index,
923 struct property *prop)
925 struct of_drconf_cell *lmbs;
926 u32 num_lmbs, *p;
927 int i, rc, start_lmb_found;
928 int lmbs_available = 0, start_index = 0, end_index;
930 pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
931 lmbs_to_add, drc_index);
933 if (lmbs_to_add == 0)
934 return -EINVAL;
936 p = prop->value;
937 num_lmbs = *p++;
938 lmbs = (struct of_drconf_cell *)p;
939 start_lmb_found = 0;
941 /* Navigate to drc_index */
942 while (start_index < num_lmbs) {
943 if (lmbs[start_index].drc_index == drc_index) {
944 start_lmb_found = 1;
945 break;
948 start_index++;
951 if (!start_lmb_found)
952 return -EINVAL;
954 end_index = start_index + lmbs_to_add;
956 /* Validate that the LMBs in this range are not reserved */
957 for (i = start_index; i < end_index; i++) {
958 if (lmbs[i].flags & DRCONF_MEM_RESERVED)
959 break;
961 lmbs_available++;
964 if (lmbs_available < lmbs_to_add)
965 return -EINVAL;
967 for (i = start_index; i < end_index; i++) {
968 if (lmbs[i].flags & DRCONF_MEM_ASSIGNED)
969 continue;
971 rc = dlpar_acquire_drc(lmbs[i].drc_index);
972 if (rc)
973 break;
975 rc = dlpar_add_lmb(&lmbs[i]);
976 if (rc) {
977 dlpar_release_drc(lmbs[i].drc_index);
978 break;
981 lmbs[i].reserved = 1;
984 if (rc) {
985 pr_err("Memory indexed-count-add failed, removing any added LMBs\n");
987 for (i = start_index; i < end_index; i++) {
988 if (!lmbs[i].reserved)
989 continue;
991 rc = dlpar_remove_lmb(&lmbs[i]);
992 if (rc)
993 pr_err("Failed to remove LMB, drc index %x\n",
994 be32_to_cpu(lmbs[i].drc_index));
995 else
996 dlpar_release_drc(lmbs[i].drc_index);
998 rc = -EINVAL;
999 } else {
1000 for (i = start_index; i < end_index; i++) {
1001 if (!lmbs[i].reserved)
1002 continue;
1004 pr_info("Memory at %llx (drc index %x) was hot-added\n",
1005 lmbs[i].base_addr, lmbs[i].drc_index);
1006 lmbs[i].reserved = 0;
1010 return rc;
1013 int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
1015 struct device_node *dn;
1016 struct property *prop;
1017 u32 count, drc_index;
1018 int rc;
1020 lock_device_hotplug();
1022 dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1023 if (!dn) {
1024 rc = -EINVAL;
1025 goto dlpar_memory_out;
1028 prop = dlpar_clone_drconf_property(dn);
1029 if (!prop) {
1030 rc = -EINVAL;
1031 goto dlpar_memory_out;
1034 switch (hp_elog->action) {
1035 case PSERIES_HP_ELOG_ACTION_ADD:
1036 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) {
1037 count = hp_elog->_drc_u.drc_count;
1038 rc = dlpar_memory_add_by_count(count, prop);
1039 } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
1040 drc_index = hp_elog->_drc_u.drc_index;
1041 rc = dlpar_memory_add_by_index(drc_index, prop);
1042 } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) {
1043 count = hp_elog->_drc_u.ic.count;
1044 drc_index = hp_elog->_drc_u.ic.index;
1045 rc = dlpar_memory_add_by_ic(count, drc_index, prop);
1046 } else {
1047 rc = -EINVAL;
1050 break;
1051 case PSERIES_HP_ELOG_ACTION_REMOVE:
1052 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) {
1053 count = hp_elog->_drc_u.drc_count;
1054 rc = dlpar_memory_remove_by_count(count, prop);
1055 } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
1056 drc_index = hp_elog->_drc_u.drc_index;
1057 rc = dlpar_memory_remove_by_index(drc_index, prop);
1058 } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) {
1059 count = hp_elog->_drc_u.ic.count;
1060 drc_index = hp_elog->_drc_u.ic.index;
1061 rc = dlpar_memory_remove_by_ic(count, drc_index, prop);
1062 } else {
1063 rc = -EINVAL;
1066 break;
1067 case PSERIES_HP_ELOG_ACTION_READD:
1068 drc_index = hp_elog->_drc_u.drc_index;
1069 rc = dlpar_memory_readd_by_index(drc_index, prop);
1070 break;
1071 default:
1072 pr_err("Invalid action (%d) specified\n", hp_elog->action);
1073 rc = -EINVAL;
1074 break;
1077 dlpar_free_property(prop);
1079 dlpar_memory_out:
1080 of_node_put(dn);
1081 unlock_device_hotplug();
1082 return rc;
1085 static int pseries_add_mem_node(struct device_node *np)
1087 const char *type;
1088 const __be32 *regs;
1089 unsigned long base;
1090 unsigned int lmb_size;
1091 int ret = -EINVAL;
1094 * Check to see if we are actually adding memory
1096 type = of_get_property(np, "device_type", NULL);
1097 if (type == NULL || strcmp(type, "memory") != 0)
1098 return 0;
1101 * Find the base and size of the memblock
1103 regs = of_get_property(np, "reg", NULL);
1104 if (!regs)
1105 return ret;
1107 base = be64_to_cpu(*(unsigned long *)regs);
1108 lmb_size = be32_to_cpu(regs[3]);
1111 * Update memory region to represent the memory add
1113 ret = memblock_add(base, lmb_size);
1114 return (ret < 0) ? -EINVAL : 0;
1117 static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
1119 struct of_drconf_cell *new_drmem, *old_drmem;
1120 unsigned long memblock_size;
1121 u32 entries;
1122 __be32 *p;
1123 int i, rc = -EINVAL;
1125 if (rtas_hp_event)
1126 return 0;
1128 memblock_size = pseries_memory_block_size();
1129 if (!memblock_size)
1130 return -EINVAL;
1132 p = (__be32 *) pr->old_prop->value;
1133 if (!p)
1134 return -EINVAL;
1136 /* The first int of the property is the number of lmb's described
1137 * by the property. This is followed by an array of of_drconf_cell
1138 * entries. Get the number of entries and skip to the array of
1139 * of_drconf_cell's.
1141 entries = be32_to_cpu(*p++);
1142 old_drmem = (struct of_drconf_cell *)p;
1144 p = (__be32 *)pr->prop->value;
1145 p++;
1146 new_drmem = (struct of_drconf_cell *)p;
1148 for (i = 0; i < entries; i++) {
1149 if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) &&
1150 (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) {
1151 rc = pseries_remove_memblock(
1152 be64_to_cpu(old_drmem[i].base_addr),
1153 memblock_size);
1154 break;
1155 } else if ((!(be32_to_cpu(old_drmem[i].flags) &
1156 DRCONF_MEM_ASSIGNED)) &&
1157 (be32_to_cpu(new_drmem[i].flags) &
1158 DRCONF_MEM_ASSIGNED)) {
1159 rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr),
1160 memblock_size);
1161 rc = (rc < 0) ? -EINVAL : 0;
1162 break;
1165 return rc;
1168 static int pseries_memory_notifier(struct notifier_block *nb,
1169 unsigned long action, void *data)
1171 struct of_reconfig_data *rd = data;
1172 int err = 0;
1174 switch (action) {
1175 case OF_RECONFIG_ATTACH_NODE:
1176 err = pseries_add_mem_node(rd->dn);
1177 break;
1178 case OF_RECONFIG_DETACH_NODE:
1179 err = pseries_remove_mem_node(rd->dn);
1180 break;
1181 case OF_RECONFIG_UPDATE_PROPERTY:
1182 if (!strcmp(rd->prop->name, "ibm,dynamic-memory"))
1183 err = pseries_update_drconf_memory(rd);
1184 break;
1186 return notifier_from_errno(err);
1189 static struct notifier_block pseries_mem_nb = {
1190 .notifier_call = pseries_memory_notifier,
1193 static int __init pseries_memory_hotplug_init(void)
1195 if (firmware_has_feature(FW_FEATURE_LPAR))
1196 of_reconfig_notifier_register(&pseries_mem_nb);
1198 return 0;
1200 machine_device_initcall(pseries, pseries_memory_hotplug_init);