2 * Support for dynamic reconfiguration for PCI, Memory, and CPU
3 * Hotplug and Dynamic Logical Partitioning on RPA platforms.
5 * Copyright (C) 2009 Nathan Fontenot
6 * Copyright (C) 2009 IBM Corporation
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
13 #define pr_fmt(fmt) "dlpar: " fmt
15 #include <linux/kernel.h>
16 #include <linux/notifier.h>
17 #include <linux/spinlock.h>
18 #include <linux/cpu.h>
19 #include <linux/slab.h>
21 #include "offline_states.h"
25 #include <asm/machdep.h>
26 #include <asm/uaccess.h>
37 void dlpar_free_cc_property(struct property
*prop
)
44 static struct property
*dlpar_parse_cc_property(struct cc_workarea
*ccwa
)
46 struct property
*prop
;
50 prop
= kzalloc(sizeof(*prop
), GFP_KERNEL
);
54 name
= (char *)ccwa
+ be32_to_cpu(ccwa
->name_offset
);
55 prop
->name
= kstrdup(name
, GFP_KERNEL
);
57 prop
->length
= be32_to_cpu(ccwa
->prop_length
);
58 value
= (char *)ccwa
+ be32_to_cpu(ccwa
->prop_offset
);
59 prop
->value
= kmemdup(value
, prop
->length
, GFP_KERNEL
);
61 dlpar_free_cc_property(prop
);
68 static struct device_node
*dlpar_parse_cc_node(struct cc_workarea
*ccwa
,
71 struct device_node
*dn
;
74 /* If parent node path is "/" advance path to NULL terminator to
75 * prevent double leading slashs in full_name.
80 dn
= kzalloc(sizeof(*dn
), GFP_KERNEL
);
84 name
= (char *)ccwa
+ be32_to_cpu(ccwa
->name_offset
);
85 dn
->full_name
= kasprintf(GFP_KERNEL
, "%s/%s", path
, name
);
91 of_node_set_flag(dn
, OF_DYNAMIC
);
97 static void dlpar_free_one_cc_node(struct device_node
*dn
)
99 struct property
*prop
;
101 while (dn
->properties
) {
102 prop
= dn
->properties
;
103 dn
->properties
= prop
->next
;
104 dlpar_free_cc_property(prop
);
107 kfree(dn
->full_name
);
111 void dlpar_free_cc_nodes(struct device_node
*dn
)
114 dlpar_free_cc_nodes(dn
->child
);
117 dlpar_free_cc_nodes(dn
->sibling
);
119 dlpar_free_one_cc_node(dn
);
123 #define NEXT_SIBLING 1
125 #define NEXT_PROPERTY 3
126 #define PREV_PARENT 4
127 #define MORE_MEMORY 5
128 #define CALL_AGAIN -2
129 #define ERR_CFG_USE -9003
131 struct device_node
*dlpar_configure_connector(__be32 drc_index
,
132 struct device_node
*parent
)
134 struct device_node
*dn
;
135 struct device_node
*first_dn
= NULL
;
136 struct device_node
*last_dn
= NULL
;
137 struct property
*property
;
138 struct property
*last_property
= NULL
;
139 struct cc_workarea
*ccwa
;
141 const char *parent_path
= parent
->full_name
;
145 cc_token
= rtas_token("ibm,configure-connector");
146 if (cc_token
== RTAS_UNKNOWN_SERVICE
)
149 data_buf
= kzalloc(RTAS_DATA_BUF_SIZE
, GFP_KERNEL
);
153 ccwa
= (struct cc_workarea
*)&data_buf
[0];
154 ccwa
->drc_index
= drc_index
;
158 /* Since we release the rtas_data_buf lock between configure
159 * connector calls we want to re-populate the rtas_data_buffer
160 * with the contents of the previous call.
162 spin_lock(&rtas_data_buf_lock
);
164 memcpy(rtas_data_buf
, data_buf
, RTAS_DATA_BUF_SIZE
);
165 rc
= rtas_call(cc_token
, 2, 1, NULL
, rtas_data_buf
, NULL
);
166 memcpy(data_buf
, rtas_data_buf
, RTAS_DATA_BUF_SIZE
);
168 spin_unlock(&rtas_data_buf_lock
);
175 dn
= dlpar_parse_cc_node(ccwa
, parent_path
);
179 dn
->parent
= last_dn
->parent
;
180 last_dn
->sibling
= dn
;
186 parent_path
= last_dn
->full_name
;
188 dn
= dlpar_parse_cc_node(ccwa
, parent_path
);
196 dn
->parent
= last_dn
;
205 property
= dlpar_parse_cc_property(ccwa
);
209 if (!last_dn
->properties
)
210 last_dn
->properties
= property
;
212 last_property
->next
= property
;
214 last_property
= property
;
218 last_dn
= last_dn
->parent
;
219 parent_path
= last_dn
->parent
->full_name
;
228 printk(KERN_ERR
"Unexpected Error (%d) "
229 "returned from configure-connector\n", rc
);
239 dlpar_free_cc_nodes(first_dn
);
247 static struct device_node
*derive_parent(const char *path
)
249 struct device_node
*parent
;
252 last_slash
= strrchr(path
, '/');
253 if (last_slash
== path
) {
254 parent
= of_find_node_by_path("/");
257 int parent_path_len
= last_slash
- path
+ 1;
258 parent_path
= kmalloc(parent_path_len
, GFP_KERNEL
);
262 strlcpy(parent_path
, path
, parent_path_len
);
263 parent
= of_find_node_by_path(parent_path
);
270 int dlpar_attach_node(struct device_node
*dn
)
274 dn
->parent
= derive_parent(dn
->full_name
);
278 rc
= of_attach_node(dn
);
280 printk(KERN_ERR
"Failed to add device node %s\n",
285 of_node_put(dn
->parent
);
289 int dlpar_detach_node(struct device_node
*dn
)
291 struct device_node
*child
;
294 child
= of_get_next_child(dn
, NULL
);
296 dlpar_detach_node(child
);
297 child
= of_get_next_child(dn
, child
);
300 rc
= of_detach_node(dn
);
304 of_node_put(dn
); /* Must decrement the refcount */
308 #define DR_ENTITY_SENSE 9003
309 #define DR_ENTITY_PRESENT 1
310 #define DR_ENTITY_UNUSABLE 2
311 #define ALLOCATION_STATE 9003
312 #define ALLOC_UNUSABLE 0
313 #define ALLOC_USABLE 1
314 #define ISOLATION_STATE 9001
318 int dlpar_acquire_drc(u32 drc_index
)
322 rc
= rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status
,
323 DR_ENTITY_SENSE
, drc_index
);
324 if (rc
|| dr_status
!= DR_ENTITY_UNUSABLE
)
327 rc
= rtas_set_indicator(ALLOCATION_STATE
, drc_index
, ALLOC_USABLE
);
331 rc
= rtas_set_indicator(ISOLATION_STATE
, drc_index
, UNISOLATE
);
333 rtas_set_indicator(ALLOCATION_STATE
, drc_index
, ALLOC_UNUSABLE
);
340 int dlpar_release_drc(u32 drc_index
)
344 rc
= rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status
,
345 DR_ENTITY_SENSE
, drc_index
);
346 if (rc
|| dr_status
!= DR_ENTITY_PRESENT
)
349 rc
= rtas_set_indicator(ISOLATION_STATE
, drc_index
, ISOLATE
);
353 rc
= rtas_set_indicator(ALLOCATION_STATE
, drc_index
, ALLOC_UNUSABLE
);
355 rtas_set_indicator(ISOLATION_STATE
, drc_index
, UNISOLATE
);
362 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
364 static int dlpar_online_cpu(struct device_node
*dn
)
368 int len
, nthreads
, i
;
369 const __be32
*intserv
;
372 intserv
= of_get_property(dn
, "ibm,ppc-interrupt-server#s", &len
);
376 nthreads
= len
/ sizeof(u32
);
378 cpu_maps_update_begin();
379 for (i
= 0; i
< nthreads
; i
++) {
380 thread
= be32_to_cpu(intserv
[i
]);
381 for_each_present_cpu(cpu
) {
382 if (get_hard_smp_processor_id(cpu
) != thread
)
384 BUG_ON(get_cpu_current_state(cpu
)
385 != CPU_STATE_OFFLINE
);
386 cpu_maps_update_done();
387 rc
= device_online(get_cpu_device(cpu
));
390 cpu_maps_update_begin();
394 if (cpu
== num_possible_cpus())
395 printk(KERN_WARNING
"Could not find cpu to online "
396 "with physical id 0x%x\n", thread
);
398 cpu_maps_update_done();
405 static ssize_t
dlpar_cpu_probe(const char *buf
, size_t count
)
407 struct device_node
*dn
, *parent
;
411 rc
= kstrtou32(buf
, 0, &drc_index
);
415 rc
= dlpar_acquire_drc(drc_index
);
419 parent
= of_find_node_by_path("/cpus");
423 dn
= dlpar_configure_connector(cpu_to_be32(drc_index
), parent
);
429 rc
= dlpar_attach_node(dn
);
431 dlpar_release_drc(drc_index
);
432 dlpar_free_cc_nodes(dn
);
436 rc
= dlpar_online_cpu(dn
);
443 static int dlpar_offline_cpu(struct device_node
*dn
)
447 int len
, nthreads
, i
;
448 const __be32
*intserv
;
451 intserv
= of_get_property(dn
, "ibm,ppc-interrupt-server#s", &len
);
455 nthreads
= len
/ sizeof(u32
);
457 cpu_maps_update_begin();
458 for (i
= 0; i
< nthreads
; i
++) {
459 thread
= be32_to_cpu(intserv
[i
]);
460 for_each_present_cpu(cpu
) {
461 if (get_hard_smp_processor_id(cpu
) != thread
)
464 if (get_cpu_current_state(cpu
) == CPU_STATE_OFFLINE
)
467 if (get_cpu_current_state(cpu
) == CPU_STATE_ONLINE
) {
468 set_preferred_offline_state(cpu
, CPU_STATE_OFFLINE
);
469 cpu_maps_update_done();
470 rc
= device_offline(get_cpu_device(cpu
));
473 cpu_maps_update_begin();
479 * The cpu is in CPU_STATE_INACTIVE.
480 * Upgrade it's state to CPU_STATE_OFFLINE.
482 set_preferred_offline_state(cpu
, CPU_STATE_OFFLINE
);
483 BUG_ON(plpar_hcall_norets(H_PROD
, thread
)
488 if (cpu
== num_possible_cpus())
489 printk(KERN_WARNING
"Could not find cpu to offline "
490 "with physical id 0x%x\n", thread
);
492 cpu_maps_update_done();
499 static ssize_t
dlpar_cpu_release(const char *buf
, size_t count
)
501 struct device_node
*dn
;
505 dn
= of_find_node_by_path(buf
);
509 rc
= of_property_read_u32(dn
, "ibm,my-drc-index", &drc_index
);
515 rc
= dlpar_offline_cpu(dn
);
521 rc
= dlpar_release_drc(drc_index
);
527 rc
= dlpar_detach_node(dn
);
529 dlpar_acquire_drc(drc_index
);
538 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
540 static int handle_dlpar_errorlog(struct pseries_hp_errorlog
*hp_elog
)
544 /* pseries error logs are in BE format, convert to cpu type */
545 switch (hp_elog
->id_type
) {
546 case PSERIES_HP_ELOG_ID_DRC_COUNT
:
547 hp_elog
->_drc_u
.drc_count
=
548 be32_to_cpu(hp_elog
->_drc_u
.drc_count
);
550 case PSERIES_HP_ELOG_ID_DRC_INDEX
:
551 hp_elog
->_drc_u
.drc_index
=
552 be32_to_cpu(hp_elog
->_drc_u
.drc_index
);
555 switch (hp_elog
->resource
) {
556 case PSERIES_HP_ELOG_RESOURCE_MEM
:
557 rc
= dlpar_memory(hp_elog
);
560 pr_warn_ratelimited("Invalid resource (%d) specified\n",
568 static ssize_t
dlpar_store(struct class *class, struct class_attribute
*attr
,
569 const char *buf
, size_t count
)
571 struct pseries_hp_errorlog
*hp_elog
;
575 hp_elog
= kzalloc(sizeof(*hp_elog
), GFP_KERNEL
);
578 goto dlpar_store_out
;
581 /* Parse out the request from the user, this will be in the form
582 * <resource> <action> <id_type> <id>
585 if (!strncmp(arg
, "memory", 6)) {
586 hp_elog
->resource
= PSERIES_HP_ELOG_RESOURCE_MEM
;
587 arg
+= strlen("memory ");
589 pr_err("Invalid resource specified: \"%s\"\n", buf
);
591 goto dlpar_store_out
;
594 if (!strncmp(arg
, "add", 3)) {
595 hp_elog
->action
= PSERIES_HP_ELOG_ACTION_ADD
;
596 arg
+= strlen("add ");
597 } else if (!strncmp(arg
, "remove", 6)) {
598 hp_elog
->action
= PSERIES_HP_ELOG_ACTION_REMOVE
;
599 arg
+= strlen("remove ");
601 pr_err("Invalid action specified: \"%s\"\n", buf
);
603 goto dlpar_store_out
;
606 if (!strncmp(arg
, "index", 5)) {
609 hp_elog
->id_type
= PSERIES_HP_ELOG_ID_DRC_INDEX
;
610 arg
+= strlen("index ");
611 if (kstrtou32(arg
, 0, &index
)) {
613 pr_err("Invalid drc_index specified: \"%s\"\n", buf
);
614 goto dlpar_store_out
;
617 hp_elog
->_drc_u
.drc_index
= cpu_to_be32(index
);
618 } else if (!strncmp(arg
, "count", 5)) {
621 hp_elog
->id_type
= PSERIES_HP_ELOG_ID_DRC_COUNT
;
622 arg
+= strlen("count ");
623 if (kstrtou32(arg
, 0, &count
)) {
625 pr_err("Invalid count specified: \"%s\"\n", buf
);
626 goto dlpar_store_out
;
629 hp_elog
->_drc_u
.drc_count
= cpu_to_be32(count
);
631 pr_err("Invalid id_type specified: \"%s\"\n", buf
);
633 goto dlpar_store_out
;
636 rc
= handle_dlpar_errorlog(hp_elog
);
640 return rc
? rc
: count
;
643 static CLASS_ATTR(dlpar
, S_IWUSR
, NULL
, dlpar_store
);
645 static int __init
pseries_dlpar_init(void)
649 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
650 ppc_md
.cpu_probe
= dlpar_cpu_probe
;
651 ppc_md
.cpu_release
= dlpar_cpu_release
;
652 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
654 rc
= sysfs_create_file(kernel_kobj
, &class_attr_dlpar
.attr
);
658 machine_device_initcall(pseries
, pseries_dlpar_init
);