x86, efi: Set runtime_version to the EFI spec revision
[linux/fpc-iii.git] / arch / powerpc / platforms / pseries / dlpar.c
bloba1a7b9a67ffde37f588f3fc6c9db88faab3d02ba
1 /*
2 * Support for dynamic reconfiguration for PCI, Memory, and CPU
3 * Hotplug and Dynamic Logical Partitioning on RPA platforms.
5 * Copyright (C) 2009 Nathan Fontenot
6 * Copyright (C) 2009 IBM Corporation
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/kref.h>
15 #include <linux/notifier.h>
16 #include <linux/spinlock.h>
17 #include <linux/cpu.h>
18 #include <linux/slab.h>
19 #include <linux/of.h>
20 #include "offline_states.h"
22 #include <asm/prom.h>
23 #include <asm/machdep.h>
24 #include <asm/uaccess.h>
25 #include <asm/rtas.h>
27 struct cc_workarea {
28 u32 drc_index;
29 u32 zero;
30 u32 name_offset;
31 u32 prop_length;
32 u32 prop_offset;
35 void dlpar_free_cc_property(struct property *prop)
37 kfree(prop->name);
38 kfree(prop->value);
39 kfree(prop);
42 static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
44 struct property *prop;
45 char *name;
46 char *value;
48 prop = kzalloc(sizeof(*prop), GFP_KERNEL);
49 if (!prop)
50 return NULL;
52 name = (char *)ccwa + ccwa->name_offset;
53 prop->name = kstrdup(name, GFP_KERNEL);
55 prop->length = ccwa->prop_length;
56 value = (char *)ccwa + ccwa->prop_offset;
57 prop->value = kmemdup(value, prop->length, GFP_KERNEL);
58 if (!prop->value) {
59 dlpar_free_cc_property(prop);
60 return NULL;
63 return prop;
66 static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa)
68 struct device_node *dn;
69 char *name;
71 dn = kzalloc(sizeof(*dn), GFP_KERNEL);
72 if (!dn)
73 return NULL;
75 /* The configure connector reported name does not contain a
76 * preceding '/', so we allocate a buffer large enough to
77 * prepend this to the full_name.
79 name = (char *)ccwa + ccwa->name_offset;
80 dn->full_name = kasprintf(GFP_KERNEL, "/%s", name);
81 if (!dn->full_name) {
82 kfree(dn);
83 return NULL;
86 return dn;
89 static void dlpar_free_one_cc_node(struct device_node *dn)
91 struct property *prop;
93 while (dn->properties) {
94 prop = dn->properties;
95 dn->properties = prop->next;
96 dlpar_free_cc_property(prop);
99 kfree(dn->full_name);
100 kfree(dn);
103 void dlpar_free_cc_nodes(struct device_node *dn)
105 if (dn->child)
106 dlpar_free_cc_nodes(dn->child);
108 if (dn->sibling)
109 dlpar_free_cc_nodes(dn->sibling);
111 dlpar_free_one_cc_node(dn);
114 #define COMPLETE 0
115 #define NEXT_SIBLING 1
116 #define NEXT_CHILD 2
117 #define NEXT_PROPERTY 3
118 #define PREV_PARENT 4
119 #define MORE_MEMORY 5
120 #define CALL_AGAIN -2
121 #define ERR_CFG_USE -9003
123 struct device_node *dlpar_configure_connector(u32 drc_index)
125 struct device_node *dn;
126 struct device_node *first_dn = NULL;
127 struct device_node *last_dn = NULL;
128 struct property *property;
129 struct property *last_property = NULL;
130 struct cc_workarea *ccwa;
131 char *data_buf;
132 int cc_token;
133 int rc = -1;
135 cc_token = rtas_token("ibm,configure-connector");
136 if (cc_token == RTAS_UNKNOWN_SERVICE)
137 return NULL;
139 data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
140 if (!data_buf)
141 return NULL;
143 ccwa = (struct cc_workarea *)&data_buf[0];
144 ccwa->drc_index = drc_index;
145 ccwa->zero = 0;
147 do {
148 /* Since we release the rtas_data_buf lock between configure
149 * connector calls we want to re-populate the rtas_data_buffer
150 * with the contents of the previous call.
152 spin_lock(&rtas_data_buf_lock);
154 memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE);
155 rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
156 memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
158 spin_unlock(&rtas_data_buf_lock);
160 switch (rc) {
161 case COMPLETE:
162 break;
164 case NEXT_SIBLING:
165 dn = dlpar_parse_cc_node(ccwa);
166 if (!dn)
167 goto cc_error;
169 dn->parent = last_dn->parent;
170 last_dn->sibling = dn;
171 last_dn = dn;
172 break;
174 case NEXT_CHILD:
175 dn = dlpar_parse_cc_node(ccwa);
176 if (!dn)
177 goto cc_error;
179 if (!first_dn)
180 first_dn = dn;
181 else {
182 dn->parent = last_dn;
183 if (last_dn)
184 last_dn->child = dn;
187 last_dn = dn;
188 break;
190 case NEXT_PROPERTY:
191 property = dlpar_parse_cc_property(ccwa);
192 if (!property)
193 goto cc_error;
195 if (!last_dn->properties)
196 last_dn->properties = property;
197 else
198 last_property->next = property;
200 last_property = property;
201 break;
203 case PREV_PARENT:
204 last_dn = last_dn->parent;
205 break;
207 case CALL_AGAIN:
208 break;
210 case MORE_MEMORY:
211 case ERR_CFG_USE:
212 default:
213 printk(KERN_ERR "Unexpected Error (%d) "
214 "returned from configure-connector\n", rc);
215 goto cc_error;
217 } while (rc);
219 cc_error:
220 kfree(data_buf);
222 if (rc) {
223 if (first_dn)
224 dlpar_free_cc_nodes(first_dn);
226 return NULL;
229 return first_dn;
232 static struct device_node *derive_parent(const char *path)
234 struct device_node *parent;
235 char *last_slash;
237 last_slash = strrchr(path, '/');
238 if (last_slash == path) {
239 parent = of_find_node_by_path("/");
240 } else {
241 char *parent_path;
242 int parent_path_len = last_slash - path + 1;
243 parent_path = kmalloc(parent_path_len, GFP_KERNEL);
244 if (!parent_path)
245 return NULL;
247 strlcpy(parent_path, path, parent_path_len);
248 parent = of_find_node_by_path(parent_path);
249 kfree(parent_path);
252 return parent;
255 int dlpar_attach_node(struct device_node *dn)
257 int rc;
259 of_node_set_flag(dn, OF_DYNAMIC);
260 kref_init(&dn->kref);
261 dn->parent = derive_parent(dn->full_name);
262 if (!dn->parent)
263 return -ENOMEM;
265 rc = of_attach_node(dn);
266 if (rc) {
267 printk(KERN_ERR "Failed to add device node %s\n",
268 dn->full_name);
269 return rc;
272 of_node_put(dn->parent);
273 return 0;
276 int dlpar_detach_node(struct device_node *dn)
278 int rc;
280 rc = of_detach_node(dn);
281 if (rc)
282 return rc;
284 of_node_put(dn); /* Must decrement the refcount */
285 return 0;
288 #define DR_ENTITY_SENSE 9003
289 #define DR_ENTITY_PRESENT 1
290 #define DR_ENTITY_UNUSABLE 2
291 #define ALLOCATION_STATE 9003
292 #define ALLOC_UNUSABLE 0
293 #define ALLOC_USABLE 1
294 #define ISOLATION_STATE 9001
295 #define ISOLATE 0
296 #define UNISOLATE 1
298 int dlpar_acquire_drc(u32 drc_index)
300 int dr_status, rc;
302 rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
303 DR_ENTITY_SENSE, drc_index);
304 if (rc || dr_status != DR_ENTITY_UNUSABLE)
305 return -1;
307 rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE);
308 if (rc)
309 return rc;
311 rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
312 if (rc) {
313 rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
314 return rc;
317 return 0;
320 int dlpar_release_drc(u32 drc_index)
322 int dr_status, rc;
324 rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
325 DR_ENTITY_SENSE, drc_index);
326 if (rc || dr_status != DR_ENTITY_PRESENT)
327 return -1;
329 rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE);
330 if (rc)
331 return rc;
333 rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
334 if (rc) {
335 rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
336 return rc;
339 return 0;
342 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
344 static int dlpar_online_cpu(struct device_node *dn)
346 int rc = 0;
347 unsigned int cpu;
348 int len, nthreads, i;
349 const u32 *intserv;
351 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
352 if (!intserv)
353 return -EINVAL;
355 nthreads = len / sizeof(u32);
357 cpu_maps_update_begin();
358 for (i = 0; i < nthreads; i++) {
359 for_each_present_cpu(cpu) {
360 if (get_hard_smp_processor_id(cpu) != intserv[i])
361 continue;
362 BUG_ON(get_cpu_current_state(cpu)
363 != CPU_STATE_OFFLINE);
364 cpu_maps_update_done();
365 rc = cpu_up(cpu);
366 if (rc)
367 goto out;
368 cpu_maps_update_begin();
370 break;
372 if (cpu == num_possible_cpus())
373 printk(KERN_WARNING "Could not find cpu to online "
374 "with physical id 0x%x\n", intserv[i]);
376 cpu_maps_update_done();
378 out:
379 return rc;
383 static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
385 struct device_node *dn;
386 unsigned long drc_index;
387 char *cpu_name;
388 int rc;
390 cpu_hotplug_driver_lock();
391 rc = strict_strtoul(buf, 0, &drc_index);
392 if (rc) {
393 rc = -EINVAL;
394 goto out;
397 dn = dlpar_configure_connector(drc_index);
398 if (!dn) {
399 rc = -EINVAL;
400 goto out;
403 /* configure-connector reports cpus as living in the base
404 * directory of the device tree. CPUs actually live in the
405 * cpus directory so we need to fixup the full_name.
407 cpu_name = kasprintf(GFP_KERNEL, "/cpus%s", dn->full_name);
408 if (!cpu_name) {
409 dlpar_free_cc_nodes(dn);
410 rc = -ENOMEM;
411 goto out;
414 kfree(dn->full_name);
415 dn->full_name = cpu_name;
417 rc = dlpar_acquire_drc(drc_index);
418 if (rc) {
419 dlpar_free_cc_nodes(dn);
420 rc = -EINVAL;
421 goto out;
424 rc = dlpar_attach_node(dn);
425 if (rc) {
426 dlpar_release_drc(drc_index);
427 dlpar_free_cc_nodes(dn);
428 goto out;
431 rc = dlpar_online_cpu(dn);
432 out:
433 cpu_hotplug_driver_unlock();
435 return rc ? rc : count;
438 static int dlpar_offline_cpu(struct device_node *dn)
440 int rc = 0;
441 unsigned int cpu;
442 int len, nthreads, i;
443 const u32 *intserv;
445 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
446 if (!intserv)
447 return -EINVAL;
449 nthreads = len / sizeof(u32);
451 cpu_maps_update_begin();
452 for (i = 0; i < nthreads; i++) {
453 for_each_present_cpu(cpu) {
454 if (get_hard_smp_processor_id(cpu) != intserv[i])
455 continue;
457 if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE)
458 break;
460 if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
461 set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
462 cpu_maps_update_done();
463 rc = cpu_down(cpu);
464 if (rc)
465 goto out;
466 cpu_maps_update_begin();
467 break;
472 * The cpu is in CPU_STATE_INACTIVE.
473 * Upgrade it's state to CPU_STATE_OFFLINE.
475 set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
476 BUG_ON(plpar_hcall_norets(H_PROD, intserv[i])
477 != H_SUCCESS);
478 __cpu_die(cpu);
479 break;
481 if (cpu == num_possible_cpus())
482 printk(KERN_WARNING "Could not find cpu to offline "
483 "with physical id 0x%x\n", intserv[i]);
485 cpu_maps_update_done();
487 out:
488 return rc;
492 static ssize_t dlpar_cpu_release(const char *buf, size_t count)
494 struct device_node *dn;
495 const u32 *drc_index;
496 int rc;
498 dn = of_find_node_by_path(buf);
499 if (!dn)
500 return -EINVAL;
502 drc_index = of_get_property(dn, "ibm,my-drc-index", NULL);
503 if (!drc_index) {
504 of_node_put(dn);
505 return -EINVAL;
508 cpu_hotplug_driver_lock();
509 rc = dlpar_offline_cpu(dn);
510 if (rc) {
511 of_node_put(dn);
512 rc = -EINVAL;
513 goto out;
516 rc = dlpar_release_drc(*drc_index);
517 if (rc) {
518 of_node_put(dn);
519 goto out;
522 rc = dlpar_detach_node(dn);
523 if (rc) {
524 dlpar_acquire_drc(*drc_index);
525 goto out;
528 of_node_put(dn);
529 out:
530 cpu_hotplug_driver_unlock();
531 return rc ? rc : count;
534 static int __init pseries_dlpar_init(void)
536 ppc_md.cpu_probe = dlpar_cpu_probe;
537 ppc_md.cpu_release = dlpar_cpu_release;
539 return 0;
541 machine_device_initcall(pseries, pseries_dlpar_init);
543 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */