x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / arch / powerpc / platforms / pseries / dlpar.c
blobbda18d8e1674b5c269b2de692c2afb1c9d05ed5d
1 /*
2 * Support for dynamic reconfiguration for PCI, Memory, and CPU
3 * Hotplug and Dynamic Logical Partitioning on RPA platforms.
5 * Copyright (C) 2009 Nathan Fontenot
6 * Copyright (C) 2009 IBM Corporation
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
13 #define pr_fmt(fmt) "dlpar: " fmt
15 #include <linux/kernel.h>
16 #include <linux/notifier.h>
17 #include <linux/spinlock.h>
18 #include <linux/cpu.h>
19 #include <linux/slab.h>
20 #include <linux/of.h>
22 #include "of_helpers.h"
23 #include "pseries.h"
25 #include <asm/prom.h>
26 #include <asm/machdep.h>
27 #include <linux/uaccess.h>
28 #include <asm/rtas.h>
30 static struct workqueue_struct *pseries_hp_wq;
32 struct pseries_hp_work {
33 struct work_struct work;
34 struct pseries_hp_errorlog *errlog;
35 struct completion *hp_completion;
36 int *rc;
39 struct cc_workarea {
40 __be32 drc_index;
41 __be32 zero;
42 __be32 name_offset;
43 __be32 prop_length;
44 __be32 prop_offset;
47 void dlpar_free_cc_property(struct property *prop)
49 kfree(prop->name);
50 kfree(prop->value);
51 kfree(prop);
54 static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
56 struct property *prop;
57 char *name;
58 char *value;
60 prop = kzalloc(sizeof(*prop), GFP_KERNEL);
61 if (!prop)
62 return NULL;
64 name = (char *)ccwa + be32_to_cpu(ccwa->name_offset);
65 prop->name = kstrdup(name, GFP_KERNEL);
67 prop->length = be32_to_cpu(ccwa->prop_length);
68 value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset);
69 prop->value = kmemdup(value, prop->length, GFP_KERNEL);
70 if (!prop->value) {
71 dlpar_free_cc_property(prop);
72 return NULL;
75 return prop;
78 static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
79 const char *path)
81 struct device_node *dn;
82 char *name;
84 /* If parent node path is "/" advance path to NULL terminator to
85 * prevent double leading slashs in full_name.
87 if (!path[1])
88 path++;
90 dn = kzalloc(sizeof(*dn), GFP_KERNEL);
91 if (!dn)
92 return NULL;
94 name = (char *)ccwa + be32_to_cpu(ccwa->name_offset);
95 dn->full_name = kasprintf(GFP_KERNEL, "%s/%s", path, name);
96 if (!dn->full_name) {
97 kfree(dn);
98 return NULL;
101 of_node_set_flag(dn, OF_DYNAMIC);
102 of_node_init(dn);
104 return dn;
107 static void dlpar_free_one_cc_node(struct device_node *dn)
109 struct property *prop;
111 while (dn->properties) {
112 prop = dn->properties;
113 dn->properties = prop->next;
114 dlpar_free_cc_property(prop);
117 kfree(dn->full_name);
118 kfree(dn);
121 void dlpar_free_cc_nodes(struct device_node *dn)
123 if (dn->child)
124 dlpar_free_cc_nodes(dn->child);
126 if (dn->sibling)
127 dlpar_free_cc_nodes(dn->sibling);
129 dlpar_free_one_cc_node(dn);
132 #define COMPLETE 0
133 #define NEXT_SIBLING 1
134 #define NEXT_CHILD 2
135 #define NEXT_PROPERTY 3
136 #define PREV_PARENT 4
137 #define MORE_MEMORY 5
138 #define CALL_AGAIN -2
139 #define ERR_CFG_USE -9003
141 struct device_node *dlpar_configure_connector(__be32 drc_index,
142 struct device_node *parent)
144 struct device_node *dn;
145 struct device_node *first_dn = NULL;
146 struct device_node *last_dn = NULL;
147 struct property *property;
148 struct property *last_property = NULL;
149 struct cc_workarea *ccwa;
150 char *data_buf;
151 const char *parent_path = parent->full_name;
152 int cc_token;
153 int rc = -1;
155 cc_token = rtas_token("ibm,configure-connector");
156 if (cc_token == RTAS_UNKNOWN_SERVICE)
157 return NULL;
159 data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
160 if (!data_buf)
161 return NULL;
163 ccwa = (struct cc_workarea *)&data_buf[0];
164 ccwa->drc_index = drc_index;
165 ccwa->zero = 0;
167 do {
168 /* Since we release the rtas_data_buf lock between configure
169 * connector calls we want to re-populate the rtas_data_buffer
170 * with the contents of the previous call.
172 spin_lock(&rtas_data_buf_lock);
174 memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE);
175 rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
176 memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
178 spin_unlock(&rtas_data_buf_lock);
180 switch (rc) {
181 case COMPLETE:
182 break;
184 case NEXT_SIBLING:
185 dn = dlpar_parse_cc_node(ccwa, parent_path);
186 if (!dn)
187 goto cc_error;
189 dn->parent = last_dn->parent;
190 last_dn->sibling = dn;
191 last_dn = dn;
192 break;
194 case NEXT_CHILD:
195 if (first_dn)
196 parent_path = last_dn->full_name;
198 dn = dlpar_parse_cc_node(ccwa, parent_path);
199 if (!dn)
200 goto cc_error;
202 if (!first_dn) {
203 dn->parent = parent;
204 first_dn = dn;
205 } else {
206 dn->parent = last_dn;
207 if (last_dn)
208 last_dn->child = dn;
211 last_dn = dn;
212 break;
214 case NEXT_PROPERTY:
215 property = dlpar_parse_cc_property(ccwa);
216 if (!property)
217 goto cc_error;
219 if (!last_dn->properties)
220 last_dn->properties = property;
221 else
222 last_property->next = property;
224 last_property = property;
225 break;
227 case PREV_PARENT:
228 last_dn = last_dn->parent;
229 parent_path = last_dn->parent->full_name;
230 break;
232 case CALL_AGAIN:
233 break;
235 case MORE_MEMORY:
236 case ERR_CFG_USE:
237 default:
238 printk(KERN_ERR "Unexpected Error (%d) "
239 "returned from configure-connector\n", rc);
240 goto cc_error;
242 } while (rc);
244 cc_error:
245 kfree(data_buf);
247 if (rc) {
248 if (first_dn)
249 dlpar_free_cc_nodes(first_dn);
251 return NULL;
254 return first_dn;
257 int dlpar_attach_node(struct device_node *dn)
259 int rc;
261 dn->parent = pseries_of_derive_parent(dn->full_name);
262 if (IS_ERR(dn->parent))
263 return PTR_ERR(dn->parent);
265 rc = of_attach_node(dn);
266 if (rc) {
267 printk(KERN_ERR "Failed to add device node %s\n",
268 dn->full_name);
269 return rc;
272 of_node_put(dn->parent);
273 return 0;
276 int dlpar_detach_node(struct device_node *dn)
278 struct device_node *child;
279 int rc;
281 child = of_get_next_child(dn, NULL);
282 while (child) {
283 dlpar_detach_node(child);
284 child = of_get_next_child(dn, child);
287 rc = of_detach_node(dn);
288 if (rc)
289 return rc;
291 return 0;
294 #define DR_ENTITY_SENSE 9003
295 #define DR_ENTITY_PRESENT 1
296 #define DR_ENTITY_UNUSABLE 2
297 #define ALLOCATION_STATE 9003
298 #define ALLOC_UNUSABLE 0
299 #define ALLOC_USABLE 1
300 #define ISOLATION_STATE 9001
301 #define ISOLATE 0
302 #define UNISOLATE 1
304 int dlpar_acquire_drc(u32 drc_index)
306 int dr_status, rc;
308 rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
309 DR_ENTITY_SENSE, drc_index);
310 if (rc || dr_status != DR_ENTITY_UNUSABLE)
311 return -1;
313 rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE);
314 if (rc)
315 return rc;
317 rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
318 if (rc) {
319 rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
320 return rc;
323 return 0;
326 int dlpar_release_drc(u32 drc_index)
328 int dr_status, rc;
330 rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
331 DR_ENTITY_SENSE, drc_index);
332 if (rc || dr_status != DR_ENTITY_PRESENT)
333 return -1;
335 rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE);
336 if (rc)
337 return rc;
339 rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
340 if (rc) {
341 rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
342 return rc;
345 return 0;
348 static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
350 int rc;
352 /* pseries error logs are in BE format, convert to cpu type */
353 switch (hp_elog->id_type) {
354 case PSERIES_HP_ELOG_ID_DRC_COUNT:
355 hp_elog->_drc_u.drc_count =
356 be32_to_cpu(hp_elog->_drc_u.drc_count);
357 break;
358 case PSERIES_HP_ELOG_ID_DRC_INDEX:
359 hp_elog->_drc_u.drc_index =
360 be32_to_cpu(hp_elog->_drc_u.drc_index);
361 break;
362 case PSERIES_HP_ELOG_ID_DRC_IC:
363 hp_elog->_drc_u.ic.count =
364 be32_to_cpu(hp_elog->_drc_u.ic.count);
365 hp_elog->_drc_u.ic.index =
366 be32_to_cpu(hp_elog->_drc_u.ic.index);
369 switch (hp_elog->resource) {
370 case PSERIES_HP_ELOG_RESOURCE_MEM:
371 rc = dlpar_memory(hp_elog);
372 break;
373 case PSERIES_HP_ELOG_RESOURCE_CPU:
374 rc = dlpar_cpu(hp_elog);
375 break;
376 default:
377 pr_warn_ratelimited("Invalid resource (%d) specified\n",
378 hp_elog->resource);
379 rc = -EINVAL;
382 return rc;
385 static void pseries_hp_work_fn(struct work_struct *work)
387 struct pseries_hp_work *hp_work =
388 container_of(work, struct pseries_hp_work, work);
390 if (hp_work->rc)
391 *(hp_work->rc) = handle_dlpar_errorlog(hp_work->errlog);
392 else
393 handle_dlpar_errorlog(hp_work->errlog);
395 if (hp_work->hp_completion)
396 complete(hp_work->hp_completion);
398 kfree(hp_work->errlog);
399 kfree((void *)work);
402 void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog,
403 struct completion *hotplug_done, int *rc)
405 struct pseries_hp_work *work;
406 struct pseries_hp_errorlog *hp_errlog_copy;
408 hp_errlog_copy = kmalloc(sizeof(struct pseries_hp_errorlog),
409 GFP_KERNEL);
410 memcpy(hp_errlog_copy, hp_errlog, sizeof(struct pseries_hp_errorlog));
412 work = kmalloc(sizeof(struct pseries_hp_work), GFP_KERNEL);
413 if (work) {
414 INIT_WORK((struct work_struct *)work, pseries_hp_work_fn);
415 work->errlog = hp_errlog_copy;
416 work->hp_completion = hotplug_done;
417 work->rc = rc;
418 queue_work(pseries_hp_wq, (struct work_struct *)work);
419 } else {
420 *rc = -ENOMEM;
421 kfree(hp_errlog_copy);
422 complete(hotplug_done);
426 static int dlpar_parse_resource(char **cmd, struct pseries_hp_errorlog *hp_elog)
428 char *arg;
430 arg = strsep(cmd, " ");
431 if (!arg)
432 return -EINVAL;
434 if (sysfs_streq(arg, "memory")) {
435 hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
436 } else if (sysfs_streq(arg, "cpu")) {
437 hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU;
438 } else {
439 pr_err("Invalid resource specified.\n");
440 return -EINVAL;
443 return 0;
446 static int dlpar_parse_action(char **cmd, struct pseries_hp_errorlog *hp_elog)
448 char *arg;
450 arg = strsep(cmd, " ");
451 if (!arg)
452 return -EINVAL;
454 if (sysfs_streq(arg, "add")) {
455 hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD;
456 } else if (sysfs_streq(arg, "remove")) {
457 hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE;
458 } else {
459 pr_err("Invalid action specified.\n");
460 return -EINVAL;
463 return 0;
466 static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog)
468 char *arg;
469 u32 count, index;
471 arg = strsep(cmd, " ");
472 if (!arg)
473 return -EINVAL;
475 if (sysfs_streq(arg, "indexed-count")) {
476 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_IC;
477 arg = strsep(cmd, " ");
478 if (!arg) {
479 pr_err("No DRC count specified.\n");
480 return -EINVAL;
483 if (kstrtou32(arg, 0, &count)) {
484 pr_err("Invalid DRC count specified.\n");
485 return -EINVAL;
488 arg = strsep(cmd, " ");
489 if (!arg) {
490 pr_err("No DRC Index specified.\n");
491 return -EINVAL;
494 if (kstrtou32(arg, 0, &index)) {
495 pr_err("Invalid DRC Index specified.\n");
496 return -EINVAL;
499 hp_elog->_drc_u.ic.count = cpu_to_be32(count);
500 hp_elog->_drc_u.ic.index = cpu_to_be32(index);
501 } else if (sysfs_streq(arg, "index")) {
502 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
503 arg = strsep(cmd, " ");
504 if (!arg) {
505 pr_err("No DRC Index specified.\n");
506 return -EINVAL;
509 if (kstrtou32(arg, 0, &index)) {
510 pr_err("Invalid DRC Index specified.\n");
511 return -EINVAL;
514 hp_elog->_drc_u.drc_index = cpu_to_be32(index);
515 } else if (sysfs_streq(arg, "count")) {
516 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT;
517 arg = strsep(cmd, " ");
518 if (!arg) {
519 pr_err("No DRC count specified.\n");
520 return -EINVAL;
523 if (kstrtou32(arg, 0, &count)) {
524 pr_err("Invalid DRC count specified.\n");
525 return -EINVAL;
528 hp_elog->_drc_u.drc_count = cpu_to_be32(count);
529 } else {
530 pr_err("Invalid id_type specified.\n");
531 return -EINVAL;
534 return 0;
537 static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
538 const char *buf, size_t count)
540 struct pseries_hp_errorlog *hp_elog;
541 struct completion hotplug_done;
542 char *argbuf;
543 char *args;
544 int rc;
546 args = argbuf = kstrdup(buf, GFP_KERNEL);
547 hp_elog = kzalloc(sizeof(*hp_elog), GFP_KERNEL);
548 if (!hp_elog || !argbuf) {
549 pr_info("Could not allocate resources for DLPAR operation\n");
550 kfree(argbuf);
551 kfree(hp_elog);
552 return -ENOMEM;
556 * Parse out the request from the user, this will be in the form:
557 * <resource> <action> <id_type> <id>
559 rc = dlpar_parse_resource(&args, hp_elog);
560 if (rc)
561 goto dlpar_store_out;
563 rc = dlpar_parse_action(&args, hp_elog);
564 if (rc)
565 goto dlpar_store_out;
567 rc = dlpar_parse_id_type(&args, hp_elog);
568 if (rc)
569 goto dlpar_store_out;
571 init_completion(&hotplug_done);
572 queue_hotplug_event(hp_elog, &hotplug_done, &rc);
573 wait_for_completion(&hotplug_done);
575 dlpar_store_out:
576 kfree(argbuf);
577 kfree(hp_elog);
579 if (rc)
580 pr_err("Could not handle DLPAR request \"%s\"\n", buf);
582 return rc ? rc : count;
585 static ssize_t dlpar_show(struct class *class, struct class_attribute *attr,
586 char *buf)
588 return sprintf(buf, "%s\n", "memory,cpu");
591 static CLASS_ATTR(dlpar, S_IWUSR | S_IRUSR, dlpar_show, dlpar_store);
593 static int __init pseries_dlpar_init(void)
595 pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue",
596 WQ_UNBOUND, 1);
597 return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
599 machine_device_initcall(pseries, pseries_dlpar_init);