1 // SPDX-License-Identifier: GPL-2.0-only
3 * Support for Partition Mobility/Migration
5 * Copyright (C) 2010 Nathan Fontenot
6 * Copyright (C) 2010 IBM Corporation
10 #define pr_fmt(fmt) "mobility: " fmt
12 #include <linux/cpu.h>
13 #include <linux/kernel.h>
14 #include <linux/kobject.h>
15 #include <linux/nmi.h>
16 #include <linux/sched.h>
17 #include <linux/smp.h>
18 #include <linux/stat.h>
19 #include <linux/stop_machine.h>
20 #include <linux/completion.h>
21 #include <linux/device.h>
22 #include <linux/delay.h>
23 #include <linux/slab.h>
24 #include <linux/stringify.h>
26 #include <asm/machdep.h>
29 #include "../../kernel/cacheinfo.h"
31 static struct kobject
*mobility_kobj
;
33 struct update_props_workarea
{
40 #define NODE_ACTION_MASK 0xff000000
41 #define NODE_COUNT_MASK 0x00ffffff
43 #define DELETE_DT_NODE 0x01000000
44 #define UPDATE_DT_NODE 0x02000000
45 #define ADD_DT_NODE 0x03000000
47 #define MIGRATION_SCOPE (1)
50 static int mobility_rtas_call(int token
, char *buf
, s32 scope
)
54 spin_lock(&rtas_data_buf_lock
);
56 memcpy(rtas_data_buf
, buf
, RTAS_DATA_BUF_SIZE
);
57 rc
= rtas_call(token
, 2, 1, NULL
, rtas_data_buf
, scope
);
58 memcpy(buf
, rtas_data_buf
, RTAS_DATA_BUF_SIZE
);
60 spin_unlock(&rtas_data_buf_lock
);
64 static int delete_dt_node(struct device_node
*dn
)
66 pr_debug("removing node %pOFfp\n", dn
);
67 dlpar_detach_node(dn
);
71 static int update_dt_property(struct device_node
*dn
, struct property
**prop
,
72 const char *name
, u32 vd
, char *value
)
74 struct property
*new_prop
= *prop
;
77 /* A negative 'vd' value indicates that only part of the new property
78 * value is contained in the buffer and we need to call
79 * ibm,update-properties again to get the rest of the value.
81 * A negative value is also the two's compliment of the actual value.
83 if (vd
& 0x80000000) {
89 /* partial property fixup */
90 char *new_data
= kzalloc(new_prop
->length
+ vd
, GFP_KERNEL
);
94 memcpy(new_data
, new_prop
->value
, new_prop
->length
);
95 memcpy(new_data
+ new_prop
->length
, value
, vd
);
97 kfree(new_prop
->value
);
98 new_prop
->value
= new_data
;
99 new_prop
->length
+= vd
;
101 new_prop
= kzalloc(sizeof(*new_prop
), GFP_KERNEL
);
105 new_prop
->name
= kstrdup(name
, GFP_KERNEL
);
106 if (!new_prop
->name
) {
111 new_prop
->length
= vd
;
112 new_prop
->value
= kzalloc(new_prop
->length
, GFP_KERNEL
);
113 if (!new_prop
->value
) {
114 kfree(new_prop
->name
);
119 memcpy(new_prop
->value
, value
, vd
);
124 pr_debug("updating node %pOF property %s\n", dn
, name
);
125 of_update_property(dn
, new_prop
);
132 static int update_dt_node(struct device_node
*dn
, s32 scope
)
134 struct update_props_workarea
*upwa
;
135 struct property
*prop
= NULL
;
139 int update_properties_token
;
143 update_properties_token
= rtas_token("ibm,update-properties");
144 if (update_properties_token
== RTAS_UNKNOWN_SERVICE
)
147 rtas_buf
= kzalloc(RTAS_DATA_BUF_SIZE
, GFP_KERNEL
);
151 upwa
= (struct update_props_workarea
*)&rtas_buf
[0];
152 upwa
->phandle
= cpu_to_be32(dn
->phandle
);
155 rtas_rc
= mobility_rtas_call(update_properties_token
, rtas_buf
,
160 prop_data
= rtas_buf
+ sizeof(*upwa
);
161 nprops
= be32_to_cpu(upwa
->nprops
);
163 /* On the first call to ibm,update-properties for a node the
164 * the first property value descriptor contains an empty
165 * property name, the property value length encoded as u32,
166 * and the property value is the node path being updated.
168 if (*prop_data
== 0) {
170 vd
= be32_to_cpu(*(__be32
*)prop_data
);
171 prop_data
+= vd
+ sizeof(vd
);
175 for (i
= 0; i
< nprops
; i
++) {
178 prop_name
= prop_data
;
179 prop_data
+= strlen(prop_name
) + 1;
180 vd
= be32_to_cpu(*(__be32
*)prop_data
);
181 prop_data
+= sizeof(vd
);
185 /* name only property, nothing to do */
189 of_remove_property(dn
, of_find_property(dn
,
195 rc
= update_dt_property(dn
, &prop
, prop_name
,
198 pr_err("updating %s property failed: %d\n",
210 } while (rtas_rc
== 1);
216 static int add_dt_node(struct device_node
*parent_dn
, __be32 drc_index
)
218 struct device_node
*dn
;
221 dn
= dlpar_configure_connector(drc_index
, parent_dn
);
225 rc
= dlpar_attach_node(dn
, parent_dn
);
227 dlpar_free_cc_nodes(dn
);
229 pr_debug("added node %pOFfp\n", dn
);
234 int pseries_devicetree_update(s32 scope
)
238 int update_nodes_token
;
241 update_nodes_token
= rtas_token("ibm,update-nodes");
242 if (update_nodes_token
== RTAS_UNKNOWN_SERVICE
)
245 rtas_buf
= kzalloc(RTAS_DATA_BUF_SIZE
, GFP_KERNEL
);
250 rc
= mobility_rtas_call(update_nodes_token
, rtas_buf
, scope
);
254 data
= (__be32
*)rtas_buf
+ 4;
255 while (be32_to_cpu(*data
) & NODE_ACTION_MASK
) {
257 u32 action
= be32_to_cpu(*data
) & NODE_ACTION_MASK
;
258 u32 node_count
= be32_to_cpu(*data
) & NODE_COUNT_MASK
;
262 for (i
= 0; i
< node_count
; i
++) {
263 struct device_node
*np
;
264 __be32 phandle
= *data
++;
267 np
= of_find_node_by_phandle(be32_to_cpu(phandle
));
269 pr_warn("Failed lookup: phandle 0x%x for action 0x%x\n",
270 be32_to_cpu(phandle
), action
);
279 update_dt_node(np
, scope
);
283 add_dt_node(np
, drc_index
);
299 void post_mobility_fixup(void)
303 rtas_activate_firmware();
306 * We don't want CPUs to go online/offline while the device
307 * tree is being updated.
312 * It's common for the destination firmware to replace cache
313 * nodes. Release all of the cacheinfo hierarchy's references
314 * before updating the device tree.
316 cacheinfo_teardown();
318 rc
= pseries_devicetree_update(MIGRATION_SCOPE
);
320 pr_err("device tree update failed: %d\n", rc
);
326 /* Possibly switch to a new L1 flush type */
327 pseries_setup_security_mitigations();
329 /* Reinitialise system information for hv-24x7 */
330 read_24x7_sys_info();
335 static int poll_vasi_state(u64 handle
, unsigned long *res
)
337 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
341 hvrc
= plpar_hcall(H_VASI_STATE
, retbuf
, handle
);
355 pr_err("unexpected H_VASI_STATE result %ld\n", hvrc
);
362 static int wait_for_vasi_session_suspending(u64 handle
)
368 * Wait for transition from H_VASI_ENABLED to
369 * H_VASI_SUSPENDING. Treat anything else as an error.
372 ret
= poll_vasi_state(handle
, &state
);
374 if (ret
!= 0 || state
== H_VASI_SUSPENDING
) {
376 } else if (state
== H_VASI_ENABLED
) {
379 pr_err("unexpected H_VASI_STATE result %lu\n", state
);
386 * Proceed even if H_VASI_STATE is unavailable. If H_JOIN or
387 * ibm,suspend-me are also unimplemented, we'll recover then.
389 if (ret
== -EOPNOTSUPP
)
395 static void prod_single(unsigned int target_cpu
)
400 hwid
= get_hard_smp_processor_id(target_cpu
);
401 hvrc
= plpar_hcall_norets(H_PROD
, hwid
);
402 if (hvrc
== H_SUCCESS
)
404 pr_err_ratelimited("H_PROD of CPU %u (hwid %d) error: %ld\n",
405 target_cpu
, hwid
, hvrc
);
408 static void prod_others(void)
412 for_each_online_cpu(cpu
) {
413 if (cpu
!= smp_processor_id())
418 static u16
clamp_slb_size(void)
420 u16 prev
= mmu_slb_size
;
422 slb_set_size(SLB_MIN_SIZE
);
427 static int do_suspend(void)
433 pr_info("calling ibm,suspend-me on CPU %i\n", smp_processor_id());
436 * The destination processor model may have fewer SLB entries
437 * than the source. We reduce mmu_slb_size to a safe minimum
438 * before suspending in order to minimize the possibility of
439 * programming non-existent entries on the destination. If
440 * suspend fails, we restore it before returning. On success
441 * the OF reconfig path will update it from the new device
442 * tree after resuming on the destination.
444 saved_slb_size
= clamp_slb_size();
446 ret
= rtas_ibm_suspend_me(&status
);
448 pr_err("ibm,suspend-me error: %d\n", status
);
449 slb_set_size(saved_slb_size
);
455 static int do_join(void *arg
)
457 atomic_t
*counter
= arg
;
461 /* Must ensure MSR.EE off for H_JOIN. */
463 hvrc
= plpar_hcall_norets(H_JOIN
);
468 * All other CPUs are offline or in H_JOIN. This CPU
469 * attempts the suspend.
475 * The suspend is complete and this cpu has received a
484 pr_err_ratelimited("H_JOIN error %ld on CPU %i\n",
485 hvrc
, smp_processor_id());
489 if (atomic_inc_return(counter
) == 1) {
490 pr_info("CPU %u waking all threads\n", smp_processor_id());
494 * Execution may have been suspended for several seconds, so
495 * reset the watchdog.
497 touch_nmi_watchdog();
502 * Abort reason code byte 0. We use only the 'Migrating partition' value.
504 enum vasi_aborting_entity
{
507 PARTITION_FIRMWARE
= 3,
508 PLATFORM_FIRMWARE
= 4,
510 MIGRATING_PARTITION
= 6,
513 static void pseries_cancel_migration(u64 handle
, int err
)
520 entity
= MIGRATING_PARTITION
;
521 detail
= abs(err
) & 0xffffff;
522 reason_code
= (entity
<< 24) | detail
;
524 hvrc
= plpar_hcall_norets(H_VASI_SIGNAL
, handle
,
525 H_VASI_SIGNAL_CANCEL
, reason_code
);
527 pr_err("H_VASI_SIGNAL error: %ld\n", hvrc
);
530 static int pseries_suspend(u64 handle
)
532 const unsigned int max_attempts
= 5;
533 unsigned int retry_interval_ms
= 1;
534 unsigned int attempt
= 1;
538 atomic_t counter
= ATOMIC_INIT(0);
539 unsigned long vasi_state
;
542 ret
= stop_machine(do_join
, &counter
, cpu_online_mask
);
546 * Encountered an error. If the VASI stream is still
547 * in Suspending state, it's likely a transient
548 * condition related to some device in the partition
549 * and we can retry in the hope that the cause has
550 * cleared after some delay.
552 * A better design would allow drivers etc to prepare
553 * for the suspend and avoid conditions which prevent
554 * the suspend from succeeding. For now, we have this
557 pr_notice("Partition suspend attempt %u of %u error: %d\n",
558 attempt
, max_attempts
, ret
);
560 if (attempt
== max_attempts
)
563 vasi_err
= poll_vasi_state(handle
, &vasi_state
);
565 if (vasi_state
!= H_VASI_SUSPENDING
) {
566 pr_notice("VASI state %lu after failed suspend\n",
570 } else if (vasi_err
!= -EOPNOTSUPP
) {
571 pr_err("VASI state poll error: %d", vasi_err
);
575 pr_notice("Will retry partition suspend after %u ms\n",
578 msleep(retry_interval_ms
);
579 retry_interval_ms
*= 10;
586 static int pseries_migrate_partition(u64 handle
)
590 ret
= wait_for_vasi_session_suspending(handle
);
594 ret
= pseries_suspend(handle
);
596 post_mobility_fixup();
598 pseries_cancel_migration(handle
, ret
);
603 int rtas_syscall_dispatch_ibm_suspend_me(u64 handle
)
605 return pseries_migrate_partition(handle
);
608 static ssize_t
migration_store(struct class *class,
609 struct class_attribute
*attr
, const char *buf
,
615 rc
= kstrtou64(buf
, 0, &streamid
);
619 rc
= pseries_migrate_partition(streamid
);
627 * Used by drmgr to determine the kernel behavior of the migration interface.
629 * Version 1: Performs all PAPR requirements for migration including
630 * firmware activation and device tree update.
632 #define MIGRATION_API_VERSION 1
634 static CLASS_ATTR_WO(migration
);
635 static CLASS_ATTR_STRING(api_version
, 0444, __stringify(MIGRATION_API_VERSION
));
637 static int __init
mobility_sysfs_init(void)
641 mobility_kobj
= kobject_create_and_add("mobility", kernel_kobj
);
645 rc
= sysfs_create_file(mobility_kobj
, &class_attr_migration
.attr
);
647 pr_err("unable to create migration sysfs file (%d)\n", rc
);
649 rc
= sysfs_create_file(mobility_kobj
, &class_attr_api_version
.attr
.attr
);
651 pr_err("unable to create api_version sysfs file (%d)\n", rc
);
655 machine_device_initcall(pseries
, mobility_sysfs_init
);