2 * Support for dynamic reconfiguration for PCI, Memory, and CPU
3 * Hotplug and Dynamic Logical Partitioning on RPA platforms.
5 * Copyright (C) 2009 Nathan Fontenot
6 * Copyright (C) 2009 IBM Corporation
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
13 #define pr_fmt(fmt) "dlpar: " fmt
15 #include <linux/kernel.h>
16 #include <linux/notifier.h>
17 #include <linux/spinlock.h>
18 #include <linux/cpu.h>
19 #include <linux/slab.h>
21 #include "offline_states.h"
25 #include <asm/machdep.h>
26 #include <asm/uaccess.h>
37 void dlpar_free_cc_property(struct property
*prop
)
44 static struct property
*dlpar_parse_cc_property(struct cc_workarea
*ccwa
)
46 struct property
*prop
;
50 prop
= kzalloc(sizeof(*prop
), GFP_KERNEL
);
54 name
= (char *)ccwa
+ be32_to_cpu(ccwa
->name_offset
);
55 prop
->name
= kstrdup(name
, GFP_KERNEL
);
57 prop
->length
= be32_to_cpu(ccwa
->prop_length
);
58 value
= (char *)ccwa
+ be32_to_cpu(ccwa
->prop_offset
);
59 prop
->value
= kmemdup(value
, prop
->length
, GFP_KERNEL
);
61 dlpar_free_cc_property(prop
);
68 static struct device_node
*dlpar_parse_cc_node(struct cc_workarea
*ccwa
,
71 struct device_node
*dn
;
74 /* If parent node path is "/" advance path to NULL terminator to
75 * prevent double leading slashs in full_name.
80 dn
= kzalloc(sizeof(*dn
), GFP_KERNEL
);
84 name
= (char *)ccwa
+ be32_to_cpu(ccwa
->name_offset
);
85 dn
->full_name
= kasprintf(GFP_KERNEL
, "%s/%s", path
, name
);
91 of_node_set_flag(dn
, OF_DYNAMIC
);
97 static void dlpar_free_one_cc_node(struct device_node
*dn
)
99 struct property
*prop
;
101 while (dn
->properties
) {
102 prop
= dn
->properties
;
103 dn
->properties
= prop
->next
;
104 dlpar_free_cc_property(prop
);
107 kfree(dn
->full_name
);
111 void dlpar_free_cc_nodes(struct device_node
*dn
)
114 dlpar_free_cc_nodes(dn
->child
);
117 dlpar_free_cc_nodes(dn
->sibling
);
119 dlpar_free_one_cc_node(dn
);
123 #define NEXT_SIBLING 1
125 #define NEXT_PROPERTY 3
126 #define PREV_PARENT 4
127 #define MORE_MEMORY 5
128 #define CALL_AGAIN -2
129 #define ERR_CFG_USE -9003
131 struct device_node
*dlpar_configure_connector(__be32 drc_index
,
132 struct device_node
*parent
)
134 struct device_node
*dn
;
135 struct device_node
*first_dn
= NULL
;
136 struct device_node
*last_dn
= NULL
;
137 struct property
*property
;
138 struct property
*last_property
= NULL
;
139 struct cc_workarea
*ccwa
;
141 const char *parent_path
= parent
->full_name
;
145 cc_token
= rtas_token("ibm,configure-connector");
146 if (cc_token
== RTAS_UNKNOWN_SERVICE
)
149 data_buf
= kzalloc(RTAS_DATA_BUF_SIZE
, GFP_KERNEL
);
153 ccwa
= (struct cc_workarea
*)&data_buf
[0];
154 ccwa
->drc_index
= drc_index
;
158 /* Since we release the rtas_data_buf lock between configure
159 * connector calls we want to re-populate the rtas_data_buffer
160 * with the contents of the previous call.
162 spin_lock(&rtas_data_buf_lock
);
164 memcpy(rtas_data_buf
, data_buf
, RTAS_DATA_BUF_SIZE
);
165 rc
= rtas_call(cc_token
, 2, 1, NULL
, rtas_data_buf
, NULL
);
166 memcpy(data_buf
, rtas_data_buf
, RTAS_DATA_BUF_SIZE
);
168 spin_unlock(&rtas_data_buf_lock
);
175 dn
= dlpar_parse_cc_node(ccwa
, parent_path
);
179 dn
->parent
= last_dn
->parent
;
180 last_dn
->sibling
= dn
;
186 parent_path
= last_dn
->full_name
;
188 dn
= dlpar_parse_cc_node(ccwa
, parent_path
);
196 dn
->parent
= last_dn
;
205 property
= dlpar_parse_cc_property(ccwa
);
209 if (!last_dn
->properties
)
210 last_dn
->properties
= property
;
212 last_property
->next
= property
;
214 last_property
= property
;
218 last_dn
= last_dn
->parent
;
219 parent_path
= last_dn
->parent
->full_name
;
228 printk(KERN_ERR
"Unexpected Error (%d) "
229 "returned from configure-connector\n", rc
);
239 dlpar_free_cc_nodes(first_dn
);
247 static struct device_node
*derive_parent(const char *path
)
249 struct device_node
*parent
;
252 last_slash
= strrchr(path
, '/');
253 if (last_slash
== path
) {
254 parent
= of_find_node_by_path("/");
257 int parent_path_len
= last_slash
- path
+ 1;
258 parent_path
= kmalloc(parent_path_len
, GFP_KERNEL
);
262 strlcpy(parent_path
, path
, parent_path_len
);
263 parent
= of_find_node_by_path(parent_path
);
270 int dlpar_attach_node(struct device_node
*dn
)
274 dn
->parent
= derive_parent(dn
->full_name
);
278 rc
= of_attach_node(dn
);
280 printk(KERN_ERR
"Failed to add device node %s\n",
285 of_node_put(dn
->parent
);
289 int dlpar_detach_node(struct device_node
*dn
)
291 struct device_node
*child
;
294 child
= of_get_next_child(dn
, NULL
);
296 dlpar_detach_node(child
);
297 child
= of_get_next_child(dn
, child
);
300 rc
= of_detach_node(dn
);
304 of_node_put(dn
); /* Must decrement the refcount */
308 #define DR_ENTITY_SENSE 9003
309 #define DR_ENTITY_PRESENT 1
310 #define DR_ENTITY_UNUSABLE 2
311 #define ALLOCATION_STATE 9003
312 #define ALLOC_UNUSABLE 0
313 #define ALLOC_USABLE 1
314 #define ISOLATION_STATE 9001
318 int dlpar_acquire_drc(u32 drc_index
)
322 rc
= rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status
,
323 DR_ENTITY_SENSE
, drc_index
);
324 if (rc
|| dr_status
!= DR_ENTITY_UNUSABLE
)
327 rc
= rtas_set_indicator(ALLOCATION_STATE
, drc_index
, ALLOC_USABLE
);
331 rc
= rtas_set_indicator(ISOLATION_STATE
, drc_index
, UNISOLATE
);
333 rtas_set_indicator(ALLOCATION_STATE
, drc_index
, ALLOC_UNUSABLE
);
340 int dlpar_release_drc(u32 drc_index
)
344 rc
= rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status
,
345 DR_ENTITY_SENSE
, drc_index
);
346 if (rc
|| dr_status
!= DR_ENTITY_PRESENT
)
349 rc
= rtas_set_indicator(ISOLATION_STATE
, drc_index
, ISOLATE
);
353 rc
= rtas_set_indicator(ALLOCATION_STATE
, drc_index
, ALLOC_UNUSABLE
);
355 rtas_set_indicator(ISOLATION_STATE
, drc_index
, UNISOLATE
);
362 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
364 static int dlpar_online_cpu(struct device_node
*dn
)
368 int len
, nthreads
, i
;
369 const __be32
*intserv
;
372 intserv
= of_get_property(dn
, "ibm,ppc-interrupt-server#s", &len
);
376 nthreads
= len
/ sizeof(u32
);
378 cpu_maps_update_begin();
379 for (i
= 0; i
< nthreads
; i
++) {
380 thread
= be32_to_cpu(intserv
[i
]);
381 for_each_present_cpu(cpu
) {
382 if (get_hard_smp_processor_id(cpu
) != thread
)
384 BUG_ON(get_cpu_current_state(cpu
)
385 != CPU_STATE_OFFLINE
);
386 cpu_maps_update_done();
387 rc
= device_online(get_cpu_device(cpu
));
390 cpu_maps_update_begin();
394 if (cpu
== num_possible_cpus())
395 printk(KERN_WARNING
"Could not find cpu to online "
396 "with physical id 0x%x\n", thread
);
398 cpu_maps_update_done();
405 static ssize_t
dlpar_cpu_probe(const char *buf
, size_t count
)
407 struct device_node
*dn
, *parent
;
411 rc
= kstrtou32(buf
, 0, &drc_index
);
415 rc
= dlpar_acquire_drc(drc_index
);
419 parent
= of_find_node_by_path("/cpus");
423 dn
= dlpar_configure_connector(cpu_to_be32(drc_index
), parent
);
426 dlpar_release_drc(drc_index
);
430 rc
= dlpar_attach_node(dn
);
432 dlpar_release_drc(drc_index
);
433 dlpar_free_cc_nodes(dn
);
437 rc
= dlpar_online_cpu(dn
);
444 static int dlpar_offline_cpu(struct device_node
*dn
)
448 int len
, nthreads
, i
;
449 const __be32
*intserv
;
452 intserv
= of_get_property(dn
, "ibm,ppc-interrupt-server#s", &len
);
456 nthreads
= len
/ sizeof(u32
);
458 cpu_maps_update_begin();
459 for (i
= 0; i
< nthreads
; i
++) {
460 thread
= be32_to_cpu(intserv
[i
]);
461 for_each_present_cpu(cpu
) {
462 if (get_hard_smp_processor_id(cpu
) != thread
)
465 if (get_cpu_current_state(cpu
) == CPU_STATE_OFFLINE
)
468 if (get_cpu_current_state(cpu
) == CPU_STATE_ONLINE
) {
469 set_preferred_offline_state(cpu
, CPU_STATE_OFFLINE
);
470 cpu_maps_update_done();
471 rc
= device_offline(get_cpu_device(cpu
));
474 cpu_maps_update_begin();
480 * The cpu is in CPU_STATE_INACTIVE.
481 * Upgrade it's state to CPU_STATE_OFFLINE.
483 set_preferred_offline_state(cpu
, CPU_STATE_OFFLINE
);
484 BUG_ON(plpar_hcall_norets(H_PROD
, thread
)
489 if (cpu
== num_possible_cpus())
490 printk(KERN_WARNING
"Could not find cpu to offline "
491 "with physical id 0x%x\n", thread
);
493 cpu_maps_update_done();
500 static ssize_t
dlpar_cpu_release(const char *buf
, size_t count
)
502 struct device_node
*dn
;
506 dn
= of_find_node_by_path(buf
);
510 rc
= of_property_read_u32(dn
, "ibm,my-drc-index", &drc_index
);
516 rc
= dlpar_offline_cpu(dn
);
522 rc
= dlpar_release_drc(drc_index
);
528 rc
= dlpar_detach_node(dn
);
530 dlpar_acquire_drc(drc_index
);
539 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
541 static int handle_dlpar_errorlog(struct pseries_hp_errorlog
*hp_elog
)
545 /* pseries error logs are in BE format, convert to cpu type */
546 switch (hp_elog
->id_type
) {
547 case PSERIES_HP_ELOG_ID_DRC_COUNT
:
548 hp_elog
->_drc_u
.drc_count
=
549 be32_to_cpu(hp_elog
->_drc_u
.drc_count
);
551 case PSERIES_HP_ELOG_ID_DRC_INDEX
:
552 hp_elog
->_drc_u
.drc_index
=
553 be32_to_cpu(hp_elog
->_drc_u
.drc_index
);
556 switch (hp_elog
->resource
) {
557 case PSERIES_HP_ELOG_RESOURCE_MEM
:
558 rc
= dlpar_memory(hp_elog
);
561 pr_warn_ratelimited("Invalid resource (%d) specified\n",
569 static ssize_t
dlpar_store(struct class *class, struct class_attribute
*attr
,
570 const char *buf
, size_t count
)
572 struct pseries_hp_errorlog
*hp_elog
;
576 hp_elog
= kzalloc(sizeof(*hp_elog
), GFP_KERNEL
);
579 goto dlpar_store_out
;
582 /* Parse out the request from the user, this will be in the form
583 * <resource> <action> <id_type> <id>
586 if (!strncmp(arg
, "memory", 6)) {
587 hp_elog
->resource
= PSERIES_HP_ELOG_RESOURCE_MEM
;
588 arg
+= strlen("memory ");
590 pr_err("Invalid resource specified: \"%s\"\n", buf
);
592 goto dlpar_store_out
;
595 if (!strncmp(arg
, "add", 3)) {
596 hp_elog
->action
= PSERIES_HP_ELOG_ACTION_ADD
;
597 arg
+= strlen("add ");
598 } else if (!strncmp(arg
, "remove", 6)) {
599 hp_elog
->action
= PSERIES_HP_ELOG_ACTION_REMOVE
;
600 arg
+= strlen("remove ");
602 pr_err("Invalid action specified: \"%s\"\n", buf
);
604 goto dlpar_store_out
;
607 if (!strncmp(arg
, "index", 5)) {
610 hp_elog
->id_type
= PSERIES_HP_ELOG_ID_DRC_INDEX
;
611 arg
+= strlen("index ");
612 if (kstrtou32(arg
, 0, &index
)) {
614 pr_err("Invalid drc_index specified: \"%s\"\n", buf
);
615 goto dlpar_store_out
;
618 hp_elog
->_drc_u
.drc_index
= cpu_to_be32(index
);
619 } else if (!strncmp(arg
, "count", 5)) {
622 hp_elog
->id_type
= PSERIES_HP_ELOG_ID_DRC_COUNT
;
623 arg
+= strlen("count ");
624 if (kstrtou32(arg
, 0, &count
)) {
626 pr_err("Invalid count specified: \"%s\"\n", buf
);
627 goto dlpar_store_out
;
630 hp_elog
->_drc_u
.drc_count
= cpu_to_be32(count
);
632 pr_err("Invalid id_type specified: \"%s\"\n", buf
);
634 goto dlpar_store_out
;
637 rc
= handle_dlpar_errorlog(hp_elog
);
641 return rc
? rc
: count
;
644 static CLASS_ATTR(dlpar
, S_IWUSR
, NULL
, dlpar_store
);
646 static int __init
pseries_dlpar_init(void)
650 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
651 ppc_md
.cpu_probe
= dlpar_cpu_probe
;
652 ppc_md
.cpu_release
= dlpar_cpu_release
;
653 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
655 rc
= sysfs_create_file(kernel_kobj
, &class_attr_dlpar
.attr
);
659 machine_device_initcall(pseries
, pseries_dlpar_init
);