2 * Support for dynamic reconfiguration for PCI, Memory, and CPU
3 * Hotplug and Dynamic Logical Partitioning on RPA platforms.
5 * Copyright (C) 2009 Nathan Fontenot
6 * Copyright (C) 2009 IBM Corporation
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
13 #define pr_fmt(fmt) "dlpar: " fmt
15 #include <linux/kernel.h>
16 #include <linux/notifier.h>
17 #include <linux/spinlock.h>
18 #include <linux/cpu.h>
19 #include <linux/slab.h>
22 #include "of_helpers.h"
26 #include <asm/machdep.h>
27 #include <linux/uaccess.h>
30 static struct workqueue_struct
*pseries_hp_wq
;
32 struct pseries_hp_work
{
33 struct work_struct work
;
34 struct pseries_hp_errorlog
*errlog
;
35 struct completion
*hp_completion
;
47 void dlpar_free_cc_property(struct property
*prop
)
54 static struct property
*dlpar_parse_cc_property(struct cc_workarea
*ccwa
)
56 struct property
*prop
;
60 prop
= kzalloc(sizeof(*prop
), GFP_KERNEL
);
64 name
= (char *)ccwa
+ be32_to_cpu(ccwa
->name_offset
);
65 prop
->name
= kstrdup(name
, GFP_KERNEL
);
67 prop
->length
= be32_to_cpu(ccwa
->prop_length
);
68 value
= (char *)ccwa
+ be32_to_cpu(ccwa
->prop_offset
);
69 prop
->value
= kmemdup(value
, prop
->length
, GFP_KERNEL
);
71 dlpar_free_cc_property(prop
);
78 static struct device_node
*dlpar_parse_cc_node(struct cc_workarea
*ccwa
,
81 struct device_node
*dn
;
84 /* If parent node path is "/" advance path to NULL terminator to
85 * prevent double leading slashs in full_name.
90 dn
= kzalloc(sizeof(*dn
), GFP_KERNEL
);
94 name
= (char *)ccwa
+ be32_to_cpu(ccwa
->name_offset
);
95 dn
->full_name
= kasprintf(GFP_KERNEL
, "%s/%s", path
, name
);
101 of_node_set_flag(dn
, OF_DYNAMIC
);
107 static void dlpar_free_one_cc_node(struct device_node
*dn
)
109 struct property
*prop
;
111 while (dn
->properties
) {
112 prop
= dn
->properties
;
113 dn
->properties
= prop
->next
;
114 dlpar_free_cc_property(prop
);
117 kfree(dn
->full_name
);
121 void dlpar_free_cc_nodes(struct device_node
*dn
)
124 dlpar_free_cc_nodes(dn
->child
);
127 dlpar_free_cc_nodes(dn
->sibling
);
129 dlpar_free_one_cc_node(dn
);
133 #define NEXT_SIBLING 1
135 #define NEXT_PROPERTY 3
136 #define PREV_PARENT 4
137 #define MORE_MEMORY 5
138 #define CALL_AGAIN -2
139 #define ERR_CFG_USE -9003
141 struct device_node
*dlpar_configure_connector(__be32 drc_index
,
142 struct device_node
*parent
)
144 struct device_node
*dn
;
145 struct device_node
*first_dn
= NULL
;
146 struct device_node
*last_dn
= NULL
;
147 struct property
*property
;
148 struct property
*last_property
= NULL
;
149 struct cc_workarea
*ccwa
;
151 const char *parent_path
= parent
->full_name
;
155 cc_token
= rtas_token("ibm,configure-connector");
156 if (cc_token
== RTAS_UNKNOWN_SERVICE
)
159 data_buf
= kzalloc(RTAS_DATA_BUF_SIZE
, GFP_KERNEL
);
163 ccwa
= (struct cc_workarea
*)&data_buf
[0];
164 ccwa
->drc_index
= drc_index
;
168 /* Since we release the rtas_data_buf lock between configure
169 * connector calls we want to re-populate the rtas_data_buffer
170 * with the contents of the previous call.
172 spin_lock(&rtas_data_buf_lock
);
174 memcpy(rtas_data_buf
, data_buf
, RTAS_DATA_BUF_SIZE
);
175 rc
= rtas_call(cc_token
, 2, 1, NULL
, rtas_data_buf
, NULL
);
176 memcpy(data_buf
, rtas_data_buf
, RTAS_DATA_BUF_SIZE
);
178 spin_unlock(&rtas_data_buf_lock
);
185 dn
= dlpar_parse_cc_node(ccwa
, parent_path
);
189 dn
->parent
= last_dn
->parent
;
190 last_dn
->sibling
= dn
;
196 parent_path
= last_dn
->full_name
;
198 dn
= dlpar_parse_cc_node(ccwa
, parent_path
);
206 dn
->parent
= last_dn
;
215 property
= dlpar_parse_cc_property(ccwa
);
219 if (!last_dn
->properties
)
220 last_dn
->properties
= property
;
222 last_property
->next
= property
;
224 last_property
= property
;
228 last_dn
= last_dn
->parent
;
229 parent_path
= last_dn
->parent
->full_name
;
238 printk(KERN_ERR
"Unexpected Error (%d) "
239 "returned from configure-connector\n", rc
);
249 dlpar_free_cc_nodes(first_dn
);
257 int dlpar_attach_node(struct device_node
*dn
)
261 dn
->parent
= pseries_of_derive_parent(dn
->full_name
);
262 if (IS_ERR(dn
->parent
))
263 return PTR_ERR(dn
->parent
);
265 rc
= of_attach_node(dn
);
267 printk(KERN_ERR
"Failed to add device node %s\n",
272 of_node_put(dn
->parent
);
276 int dlpar_detach_node(struct device_node
*dn
)
278 struct device_node
*child
;
281 child
= of_get_next_child(dn
, NULL
);
283 dlpar_detach_node(child
);
284 child
= of_get_next_child(dn
, child
);
287 rc
= of_detach_node(dn
);
291 of_node_put(dn
); /* Must decrement the refcount */
295 #define DR_ENTITY_SENSE 9003
296 #define DR_ENTITY_PRESENT 1
297 #define DR_ENTITY_UNUSABLE 2
298 #define ALLOCATION_STATE 9003
299 #define ALLOC_UNUSABLE 0
300 #define ALLOC_USABLE 1
301 #define ISOLATION_STATE 9001
305 int dlpar_acquire_drc(u32 drc_index
)
309 rc
= rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status
,
310 DR_ENTITY_SENSE
, drc_index
);
311 if (rc
|| dr_status
!= DR_ENTITY_UNUSABLE
)
314 rc
= rtas_set_indicator(ALLOCATION_STATE
, drc_index
, ALLOC_USABLE
);
318 rc
= rtas_set_indicator(ISOLATION_STATE
, drc_index
, UNISOLATE
);
320 rtas_set_indicator(ALLOCATION_STATE
, drc_index
, ALLOC_UNUSABLE
);
327 int dlpar_release_drc(u32 drc_index
)
331 rc
= rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status
,
332 DR_ENTITY_SENSE
, drc_index
);
333 if (rc
|| dr_status
!= DR_ENTITY_PRESENT
)
336 rc
= rtas_set_indicator(ISOLATION_STATE
, drc_index
, ISOLATE
);
340 rc
= rtas_set_indicator(ALLOCATION_STATE
, drc_index
, ALLOC_UNUSABLE
);
342 rtas_set_indicator(ISOLATION_STATE
, drc_index
, UNISOLATE
);
349 static int handle_dlpar_errorlog(struct pseries_hp_errorlog
*hp_elog
)
353 /* pseries error logs are in BE format, convert to cpu type */
354 switch (hp_elog
->id_type
) {
355 case PSERIES_HP_ELOG_ID_DRC_COUNT
:
356 hp_elog
->_drc_u
.drc_count
=
357 be32_to_cpu(hp_elog
->_drc_u
.drc_count
);
359 case PSERIES_HP_ELOG_ID_DRC_INDEX
:
360 hp_elog
->_drc_u
.drc_index
=
361 be32_to_cpu(hp_elog
->_drc_u
.drc_index
);
364 switch (hp_elog
->resource
) {
365 case PSERIES_HP_ELOG_RESOURCE_MEM
:
366 rc
= dlpar_memory(hp_elog
);
368 case PSERIES_HP_ELOG_RESOURCE_CPU
:
369 rc
= dlpar_cpu(hp_elog
);
372 pr_warn_ratelimited("Invalid resource (%d) specified\n",
380 static void pseries_hp_work_fn(struct work_struct
*work
)
382 struct pseries_hp_work
*hp_work
=
383 container_of(work
, struct pseries_hp_work
, work
);
386 *(hp_work
->rc
) = handle_dlpar_errorlog(hp_work
->errlog
);
388 handle_dlpar_errorlog(hp_work
->errlog
);
390 if (hp_work
->hp_completion
)
391 complete(hp_work
->hp_completion
);
393 kfree(hp_work
->errlog
);
397 void queue_hotplug_event(struct pseries_hp_errorlog
*hp_errlog
,
398 struct completion
*hotplug_done
, int *rc
)
400 struct pseries_hp_work
*work
;
401 struct pseries_hp_errorlog
*hp_errlog_copy
;
403 hp_errlog_copy
= kmalloc(sizeof(struct pseries_hp_errorlog
),
405 memcpy(hp_errlog_copy
, hp_errlog
, sizeof(struct pseries_hp_errorlog
));
407 work
= kmalloc(sizeof(struct pseries_hp_work
), GFP_KERNEL
);
409 INIT_WORK((struct work_struct
*)work
, pseries_hp_work_fn
);
410 work
->errlog
= hp_errlog_copy
;
411 work
->hp_completion
= hotplug_done
;
413 queue_work(pseries_hp_wq
, (struct work_struct
*)work
);
416 kfree(hp_errlog_copy
);
417 complete(hotplug_done
);
421 static int dlpar_parse_resource(char **cmd
, struct pseries_hp_errorlog
*hp_elog
)
425 arg
= strsep(cmd
, " ");
429 if (sysfs_streq(arg
, "memory")) {
430 hp_elog
->resource
= PSERIES_HP_ELOG_RESOURCE_MEM
;
431 } else if (sysfs_streq(arg
, "cpu")) {
432 hp_elog
->resource
= PSERIES_HP_ELOG_RESOURCE_CPU
;
434 pr_err("Invalid resource specified.\n");
441 static int dlpar_parse_action(char **cmd
, struct pseries_hp_errorlog
*hp_elog
)
445 arg
= strsep(cmd
, " ");
449 if (sysfs_streq(arg
, "add")) {
450 hp_elog
->action
= PSERIES_HP_ELOG_ACTION_ADD
;
451 } else if (sysfs_streq(arg
, "remove")) {
452 hp_elog
->action
= PSERIES_HP_ELOG_ACTION_REMOVE
;
454 pr_err("Invalid action specified.\n");
461 static int dlpar_parse_id_type(char **cmd
, struct pseries_hp_errorlog
*hp_elog
)
466 arg
= strsep(cmd
, " ");
470 if (sysfs_streq(arg
, "index")) {
471 hp_elog
->id_type
= PSERIES_HP_ELOG_ID_DRC_INDEX
;
472 arg
= strsep(cmd
, " ");
474 pr_err("No DRC Index specified.\n");
478 if (kstrtou32(arg
, 0, &index
)) {
479 pr_err("Invalid DRC Index specified.\n");
483 hp_elog
->_drc_u
.drc_index
= cpu_to_be32(index
);
484 } else if (sysfs_streq(arg
, "count")) {
485 hp_elog
->id_type
= PSERIES_HP_ELOG_ID_DRC_COUNT
;
486 arg
= strsep(cmd
, " ");
488 pr_err("No DRC count specified.\n");
492 if (kstrtou32(arg
, 0, &count
)) {
493 pr_err("Invalid DRC count specified.\n");
497 hp_elog
->_drc_u
.drc_count
= cpu_to_be32(count
);
499 pr_err("Invalid id_type specified.\n");
506 static ssize_t
dlpar_store(struct class *class, struct class_attribute
*attr
,
507 const char *buf
, size_t count
)
509 struct pseries_hp_errorlog
*hp_elog
;
510 struct completion hotplug_done
;
515 args
= argbuf
= kstrdup(buf
, GFP_KERNEL
);
516 hp_elog
= kzalloc(sizeof(*hp_elog
), GFP_KERNEL
);
517 if (!hp_elog
|| !argbuf
) {
518 pr_info("Could not allocate resources for DLPAR operation\n");
525 * Parse out the request from the user, this will be in the form:
526 * <resource> <action> <id_type> <id>
528 rc
= dlpar_parse_resource(&args
, hp_elog
);
530 goto dlpar_store_out
;
532 rc
= dlpar_parse_action(&args
, hp_elog
);
534 goto dlpar_store_out
;
536 rc
= dlpar_parse_id_type(&args
, hp_elog
);
538 goto dlpar_store_out
;
540 init_completion(&hotplug_done
);
541 queue_hotplug_event(hp_elog
, &hotplug_done
, &rc
);
542 wait_for_completion(&hotplug_done
);
549 pr_err("Could not handle DLPAR request \"%s\"\n", buf
);
551 return rc
? rc
: count
;
554 static CLASS_ATTR(dlpar
, S_IWUSR
, NULL
, dlpar_store
);
556 static int __init
pseries_dlpar_init(void)
558 pseries_hp_wq
= alloc_workqueue("pseries hotplug workqueue",
560 return sysfs_create_file(kernel_kobj
, &class_attr_dlpar
.attr
);
562 machine_device_initcall(pseries
, pseries_dlpar_init
);