io_uring: ensure finish_wait() is always called in __io_uring_task_cancel()
[linux/fpc-iii.git] / arch / powerpc / platforms / pseries / mobility.c
blobea4d6a660e0dc948e954a2b999706009e7431b9f
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Support for Partition Mobility/Migration
5 * Copyright (C) 2010 Nathan Fontenot
6 * Copyright (C) 2010 IBM Corporation
7 */
10 #define pr_fmt(fmt) "mobility: " fmt
12 #include <linux/cpu.h>
13 #include <linux/kernel.h>
14 #include <linux/kobject.h>
15 #include <linux/nmi.h>
16 #include <linux/sched.h>
17 #include <linux/smp.h>
18 #include <linux/stat.h>
19 #include <linux/stop_machine.h>
20 #include <linux/completion.h>
21 #include <linux/device.h>
22 #include <linux/delay.h>
23 #include <linux/slab.h>
24 #include <linux/stringify.h>
26 #include <asm/machdep.h>
27 #include <asm/rtas.h>
28 #include "pseries.h"
29 #include "../../kernel/cacheinfo.h"
31 static struct kobject *mobility_kobj;
33 struct update_props_workarea {
34 __be32 phandle;
35 __be32 state;
36 __be64 reserved;
37 __be32 nprops;
38 } __packed;
40 #define NODE_ACTION_MASK 0xff000000
41 #define NODE_COUNT_MASK 0x00ffffff
43 #define DELETE_DT_NODE 0x01000000
44 #define UPDATE_DT_NODE 0x02000000
45 #define ADD_DT_NODE 0x03000000
47 #define MIGRATION_SCOPE (1)
48 #define PRRN_SCOPE -2
50 static int mobility_rtas_call(int token, char *buf, s32 scope)
52 int rc;
54 spin_lock(&rtas_data_buf_lock);
56 memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE);
57 rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, scope);
58 memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
60 spin_unlock(&rtas_data_buf_lock);
61 return rc;
64 static int delete_dt_node(struct device_node *dn)
66 pr_debug("removing node %pOFfp\n", dn);
67 dlpar_detach_node(dn);
68 return 0;
71 static int update_dt_property(struct device_node *dn, struct property **prop,
72 const char *name, u32 vd, char *value)
74 struct property *new_prop = *prop;
75 int more = 0;
77 /* A negative 'vd' value indicates that only part of the new property
78 * value is contained in the buffer and we need to call
79 * ibm,update-properties again to get the rest of the value.
81 * A negative value is also the two's compliment of the actual value.
83 if (vd & 0x80000000) {
84 vd = ~vd + 1;
85 more = 1;
88 if (new_prop) {
89 /* partial property fixup */
90 char *new_data = kzalloc(new_prop->length + vd, GFP_KERNEL);
91 if (!new_data)
92 return -ENOMEM;
94 memcpy(new_data, new_prop->value, new_prop->length);
95 memcpy(new_data + new_prop->length, value, vd);
97 kfree(new_prop->value);
98 new_prop->value = new_data;
99 new_prop->length += vd;
100 } else {
101 new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
102 if (!new_prop)
103 return -ENOMEM;
105 new_prop->name = kstrdup(name, GFP_KERNEL);
106 if (!new_prop->name) {
107 kfree(new_prop);
108 return -ENOMEM;
111 new_prop->length = vd;
112 new_prop->value = kzalloc(new_prop->length, GFP_KERNEL);
113 if (!new_prop->value) {
114 kfree(new_prop->name);
115 kfree(new_prop);
116 return -ENOMEM;
119 memcpy(new_prop->value, value, vd);
120 *prop = new_prop;
123 if (!more) {
124 pr_debug("updating node %pOF property %s\n", dn, name);
125 of_update_property(dn, new_prop);
126 *prop = NULL;
129 return 0;
132 static int update_dt_node(struct device_node *dn, s32 scope)
134 struct update_props_workarea *upwa;
135 struct property *prop = NULL;
136 int i, rc, rtas_rc;
137 char *prop_data;
138 char *rtas_buf;
139 int update_properties_token;
140 u32 nprops;
141 u32 vd;
143 update_properties_token = rtas_token("ibm,update-properties");
144 if (update_properties_token == RTAS_UNKNOWN_SERVICE)
145 return -EINVAL;
147 rtas_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
148 if (!rtas_buf)
149 return -ENOMEM;
151 upwa = (struct update_props_workarea *)&rtas_buf[0];
152 upwa->phandle = cpu_to_be32(dn->phandle);
154 do {
155 rtas_rc = mobility_rtas_call(update_properties_token, rtas_buf,
156 scope);
157 if (rtas_rc < 0)
158 break;
160 prop_data = rtas_buf + sizeof(*upwa);
161 nprops = be32_to_cpu(upwa->nprops);
163 /* On the first call to ibm,update-properties for a node the
164 * the first property value descriptor contains an empty
165 * property name, the property value length encoded as u32,
166 * and the property value is the node path being updated.
168 if (*prop_data == 0) {
169 prop_data++;
170 vd = be32_to_cpu(*(__be32 *)prop_data);
171 prop_data += vd + sizeof(vd);
172 nprops--;
175 for (i = 0; i < nprops; i++) {
176 char *prop_name;
178 prop_name = prop_data;
179 prop_data += strlen(prop_name) + 1;
180 vd = be32_to_cpu(*(__be32 *)prop_data);
181 prop_data += sizeof(vd);
183 switch (vd) {
184 case 0x00000000:
185 /* name only property, nothing to do */
186 break;
188 case 0x80000000:
189 of_remove_property(dn, of_find_property(dn,
190 prop_name, NULL));
191 prop = NULL;
192 break;
194 default:
195 rc = update_dt_property(dn, &prop, prop_name,
196 vd, prop_data);
197 if (rc) {
198 pr_err("updating %s property failed: %d\n",
199 prop_name, rc);
202 prop_data += vd;
203 break;
206 cond_resched();
209 cond_resched();
210 } while (rtas_rc == 1);
212 kfree(rtas_buf);
213 return 0;
216 static int add_dt_node(struct device_node *parent_dn, __be32 drc_index)
218 struct device_node *dn;
219 int rc;
221 dn = dlpar_configure_connector(drc_index, parent_dn);
222 if (!dn)
223 return -ENOENT;
225 rc = dlpar_attach_node(dn, parent_dn);
226 if (rc)
227 dlpar_free_cc_nodes(dn);
229 pr_debug("added node %pOFfp\n", dn);
231 return rc;
234 int pseries_devicetree_update(s32 scope)
236 char *rtas_buf;
237 __be32 *data;
238 int update_nodes_token;
239 int rc;
241 update_nodes_token = rtas_token("ibm,update-nodes");
242 if (update_nodes_token == RTAS_UNKNOWN_SERVICE)
243 return 0;
245 rtas_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
246 if (!rtas_buf)
247 return -ENOMEM;
249 do {
250 rc = mobility_rtas_call(update_nodes_token, rtas_buf, scope);
251 if (rc && rc != 1)
252 break;
254 data = (__be32 *)rtas_buf + 4;
255 while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
256 int i;
257 u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK;
258 u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
260 data++;
262 for (i = 0; i < node_count; i++) {
263 struct device_node *np;
264 __be32 phandle = *data++;
265 __be32 drc_index;
267 np = of_find_node_by_phandle(be32_to_cpu(phandle));
268 if (!np) {
269 pr_warn("Failed lookup: phandle 0x%x for action 0x%x\n",
270 be32_to_cpu(phandle), action);
271 continue;
274 switch (action) {
275 case DELETE_DT_NODE:
276 delete_dt_node(np);
277 break;
278 case UPDATE_DT_NODE:
279 update_dt_node(np, scope);
280 break;
281 case ADD_DT_NODE:
282 drc_index = *data++;
283 add_dt_node(np, drc_index);
284 break;
287 of_node_put(np);
288 cond_resched();
292 cond_resched();
293 } while (rc == 1);
295 kfree(rtas_buf);
296 return rc;
299 void post_mobility_fixup(void)
301 int rc;
303 rtas_activate_firmware();
306 * We don't want CPUs to go online/offline while the device
307 * tree is being updated.
309 cpus_read_lock();
312 * It's common for the destination firmware to replace cache
313 * nodes. Release all of the cacheinfo hierarchy's references
314 * before updating the device tree.
316 cacheinfo_teardown();
318 rc = pseries_devicetree_update(MIGRATION_SCOPE);
319 if (rc)
320 pr_err("device tree update failed: %d\n", rc);
322 cacheinfo_rebuild();
324 cpus_read_unlock();
326 /* Possibly switch to a new L1 flush type */
327 pseries_setup_security_mitigations();
329 /* Reinitialise system information for hv-24x7 */
330 read_24x7_sys_info();
332 return;
335 static int poll_vasi_state(u64 handle, unsigned long *res)
337 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
338 long hvrc;
339 int ret;
341 hvrc = plpar_hcall(H_VASI_STATE, retbuf, handle);
342 switch (hvrc) {
343 case H_SUCCESS:
344 ret = 0;
345 *res = retbuf[0];
346 break;
347 case H_PARAMETER:
348 ret = -EINVAL;
349 break;
350 case H_FUNCTION:
351 ret = -EOPNOTSUPP;
352 break;
353 case H_HARDWARE:
354 default:
355 pr_err("unexpected H_VASI_STATE result %ld\n", hvrc);
356 ret = -EIO;
357 break;
359 return ret;
362 static int wait_for_vasi_session_suspending(u64 handle)
364 unsigned long state;
365 int ret;
368 * Wait for transition from H_VASI_ENABLED to
369 * H_VASI_SUSPENDING. Treat anything else as an error.
371 while (true) {
372 ret = poll_vasi_state(handle, &state);
374 if (ret != 0 || state == H_VASI_SUSPENDING) {
375 break;
376 } else if (state == H_VASI_ENABLED) {
377 ssleep(1);
378 } else {
379 pr_err("unexpected H_VASI_STATE result %lu\n", state);
380 ret = -EIO;
381 break;
386 * Proceed even if H_VASI_STATE is unavailable. If H_JOIN or
387 * ibm,suspend-me are also unimplemented, we'll recover then.
389 if (ret == -EOPNOTSUPP)
390 ret = 0;
392 return ret;
395 static void prod_single(unsigned int target_cpu)
397 long hvrc;
398 int hwid;
400 hwid = get_hard_smp_processor_id(target_cpu);
401 hvrc = plpar_hcall_norets(H_PROD, hwid);
402 if (hvrc == H_SUCCESS)
403 return;
404 pr_err_ratelimited("H_PROD of CPU %u (hwid %d) error: %ld\n",
405 target_cpu, hwid, hvrc);
408 static void prod_others(void)
410 unsigned int cpu;
412 for_each_online_cpu(cpu) {
413 if (cpu != smp_processor_id())
414 prod_single(cpu);
418 static u16 clamp_slb_size(void)
420 u16 prev = mmu_slb_size;
422 slb_set_size(SLB_MIN_SIZE);
424 return prev;
427 static int do_suspend(void)
429 u16 saved_slb_size;
430 int status;
431 int ret;
433 pr_info("calling ibm,suspend-me on CPU %i\n", smp_processor_id());
436 * The destination processor model may have fewer SLB entries
437 * than the source. We reduce mmu_slb_size to a safe minimum
438 * before suspending in order to minimize the possibility of
439 * programming non-existent entries on the destination. If
440 * suspend fails, we restore it before returning. On success
441 * the OF reconfig path will update it from the new device
442 * tree after resuming on the destination.
444 saved_slb_size = clamp_slb_size();
446 ret = rtas_ibm_suspend_me(&status);
447 if (ret != 0) {
448 pr_err("ibm,suspend-me error: %d\n", status);
449 slb_set_size(saved_slb_size);
452 return ret;
455 static int do_join(void *arg)
457 atomic_t *counter = arg;
458 long hvrc;
459 int ret;
461 /* Must ensure MSR.EE off for H_JOIN. */
462 hard_irq_disable();
463 hvrc = plpar_hcall_norets(H_JOIN);
465 switch (hvrc) {
466 case H_CONTINUE:
468 * All other CPUs are offline or in H_JOIN. This CPU
469 * attempts the suspend.
471 ret = do_suspend();
472 break;
473 case H_SUCCESS:
475 * The suspend is complete and this cpu has received a
476 * prod.
478 ret = 0;
479 break;
480 case H_BAD_MODE:
481 case H_HARDWARE:
482 default:
483 ret = -EIO;
484 pr_err_ratelimited("H_JOIN error %ld on CPU %i\n",
485 hvrc, smp_processor_id());
486 break;
489 if (atomic_inc_return(counter) == 1) {
490 pr_info("CPU %u waking all threads\n", smp_processor_id());
491 prod_others();
494 * Execution may have been suspended for several seconds, so
495 * reset the watchdog.
497 touch_nmi_watchdog();
498 return ret;
502 * Abort reason code byte 0. We use only the 'Migrating partition' value.
504 enum vasi_aborting_entity {
505 ORCHESTRATOR = 1,
506 VSP_SOURCE = 2,
507 PARTITION_FIRMWARE = 3,
508 PLATFORM_FIRMWARE = 4,
509 VSP_TARGET = 5,
510 MIGRATING_PARTITION = 6,
513 static void pseries_cancel_migration(u64 handle, int err)
515 u32 reason_code;
516 u32 detail;
517 u8 entity;
518 long hvrc;
520 entity = MIGRATING_PARTITION;
521 detail = abs(err) & 0xffffff;
522 reason_code = (entity << 24) | detail;
524 hvrc = plpar_hcall_norets(H_VASI_SIGNAL, handle,
525 H_VASI_SIGNAL_CANCEL, reason_code);
526 if (hvrc)
527 pr_err("H_VASI_SIGNAL error: %ld\n", hvrc);
530 static int pseries_suspend(u64 handle)
532 const unsigned int max_attempts = 5;
533 unsigned int retry_interval_ms = 1;
534 unsigned int attempt = 1;
535 int ret;
537 while (true) {
538 atomic_t counter = ATOMIC_INIT(0);
539 unsigned long vasi_state;
540 int vasi_err;
542 ret = stop_machine(do_join, &counter, cpu_online_mask);
543 if (ret == 0)
544 break;
546 * Encountered an error. If the VASI stream is still
547 * in Suspending state, it's likely a transient
548 * condition related to some device in the partition
549 * and we can retry in the hope that the cause has
550 * cleared after some delay.
552 * A better design would allow drivers etc to prepare
553 * for the suspend and avoid conditions which prevent
554 * the suspend from succeeding. For now, we have this
555 * mitigation.
557 pr_notice("Partition suspend attempt %u of %u error: %d\n",
558 attempt, max_attempts, ret);
560 if (attempt == max_attempts)
561 break;
563 vasi_err = poll_vasi_state(handle, &vasi_state);
564 if (vasi_err == 0) {
565 if (vasi_state != H_VASI_SUSPENDING) {
566 pr_notice("VASI state %lu after failed suspend\n",
567 vasi_state);
568 break;
570 } else if (vasi_err != -EOPNOTSUPP) {
571 pr_err("VASI state poll error: %d", vasi_err);
572 break;
575 pr_notice("Will retry partition suspend after %u ms\n",
576 retry_interval_ms);
578 msleep(retry_interval_ms);
579 retry_interval_ms *= 10;
580 attempt++;
583 return ret;
586 static int pseries_migrate_partition(u64 handle)
588 int ret;
590 ret = wait_for_vasi_session_suspending(handle);
591 if (ret)
592 return ret;
594 ret = pseries_suspend(handle);
595 if (ret == 0)
596 post_mobility_fixup();
597 else
598 pseries_cancel_migration(handle, ret);
600 return ret;
603 int rtas_syscall_dispatch_ibm_suspend_me(u64 handle)
605 return pseries_migrate_partition(handle);
608 static ssize_t migration_store(struct class *class,
609 struct class_attribute *attr, const char *buf,
610 size_t count)
612 u64 streamid;
613 int rc;
615 rc = kstrtou64(buf, 0, &streamid);
616 if (rc)
617 return rc;
619 rc = pseries_migrate_partition(streamid);
620 if (rc)
621 return rc;
623 return count;
627 * Used by drmgr to determine the kernel behavior of the migration interface.
629 * Version 1: Performs all PAPR requirements for migration including
630 * firmware activation and device tree update.
632 #define MIGRATION_API_VERSION 1
634 static CLASS_ATTR_WO(migration);
635 static CLASS_ATTR_STRING(api_version, 0444, __stringify(MIGRATION_API_VERSION));
637 static int __init mobility_sysfs_init(void)
639 int rc;
641 mobility_kobj = kobject_create_and_add("mobility", kernel_kobj);
642 if (!mobility_kobj)
643 return -ENOMEM;
645 rc = sysfs_create_file(mobility_kobj, &class_attr_migration.attr);
646 if (rc)
647 pr_err("unable to create migration sysfs file (%d)\n", rc);
649 rc = sysfs_create_file(mobility_kobj, &class_attr_api_version.attr.attr);
650 if (rc)
651 pr_err("unable to create api_version sysfs file (%d)\n", rc);
653 return 0;
655 machine_device_initcall(pseries, mobility_sysfs_init);