1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2009, Microsoft Corporation.
6 * Haiyang Zhang <haiyangz@microsoft.com>
7 * Hank Janssen <hjanssen@microsoft.com>
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/hyperv.h>
16 #include <linux/version.h>
17 #include <linux/random.h>
18 #include <linux/clockchips.h>
19 #include <clocksource/hyperv_timer.h>
20 #include <asm/mshyperv.h>
21 #include "hyperv_vmbus.h"
23 /* The one and only */
24 struct hv_context hv_context
;
27 * hv_init - Main initialization routine.
29 * This routine must be called before any other routines in here are called
33 hv_context
.cpu_context
= alloc_percpu(struct hv_per_cpu_context
);
34 if (!hv_context
.cpu_context
)
40 * hv_post_message - Post a message using the hypervisor message IPC.
42 * This involves a hypercall.
44 int hv_post_message(union hv_connection_id connection_id
,
45 enum hv_message_type message_type
,
46 void *payload
, size_t payload_size
)
48 struct hv_input_post_message
*aligned_msg
;
49 struct hv_per_cpu_context
*hv_cpu
;
52 if (payload_size
> HV_MESSAGE_PAYLOAD_BYTE_COUNT
)
55 hv_cpu
= get_cpu_ptr(hv_context
.cpu_context
);
56 aligned_msg
= hv_cpu
->post_msg_page
;
57 aligned_msg
->connectionid
= connection_id
;
58 aligned_msg
->reserved
= 0;
59 aligned_msg
->message_type
= message_type
;
60 aligned_msg
->payload_size
= payload_size
;
61 memcpy((void *)aligned_msg
->payload
, payload
, payload_size
);
63 status
= hv_do_hypercall(HVCALL_POST_MESSAGE
, aligned_msg
, NULL
);
65 /* Preemption must remain disabled until after the hypercall
66 * so some other thread can't get scheduled onto this cpu and
67 * corrupt the per-cpu post_msg_page
71 return status
& 0xFFFF;
74 int hv_synic_alloc(void)
77 struct hv_per_cpu_context
*hv_cpu
;
80 * First, zero all per-cpu memory areas so hv_synic_free() can
81 * detect what memory has been allocated and cleanup properly
84 for_each_present_cpu(cpu
) {
85 hv_cpu
= per_cpu_ptr(hv_context
.cpu_context
, cpu
);
86 memset(hv_cpu
, 0, sizeof(*hv_cpu
));
89 hv_context
.hv_numa_map
= kcalloc(nr_node_ids
, sizeof(struct cpumask
),
91 if (hv_context
.hv_numa_map
== NULL
) {
92 pr_err("Unable to allocate NUMA map\n");
96 for_each_present_cpu(cpu
) {
97 hv_cpu
= per_cpu_ptr(hv_context
.cpu_context
, cpu
);
99 tasklet_init(&hv_cpu
->msg_dpc
,
100 vmbus_on_msg_dpc
, (unsigned long) hv_cpu
);
102 hv_cpu
->synic_message_page
=
103 (void *)get_zeroed_page(GFP_ATOMIC
);
104 if (hv_cpu
->synic_message_page
== NULL
) {
105 pr_err("Unable to allocate SYNIC message page\n");
109 hv_cpu
->synic_event_page
= (void *)get_zeroed_page(GFP_ATOMIC
);
110 if (hv_cpu
->synic_event_page
== NULL
) {
111 pr_err("Unable to allocate SYNIC event page\n");
115 hv_cpu
->post_msg_page
= (void *)get_zeroed_page(GFP_ATOMIC
);
116 if (hv_cpu
->post_msg_page
== NULL
) {
117 pr_err("Unable to allocate post msg page\n");
121 INIT_LIST_HEAD(&hv_cpu
->chan_list
);
127 * Any memory allocations that succeeded will be freed when
128 * the caller cleans up by calling hv_synic_free()
134 void hv_synic_free(void)
138 for_each_present_cpu(cpu
) {
139 struct hv_per_cpu_context
*hv_cpu
140 = per_cpu_ptr(hv_context
.cpu_context
, cpu
);
142 free_page((unsigned long)hv_cpu
->synic_event_page
);
143 free_page((unsigned long)hv_cpu
->synic_message_page
);
144 free_page((unsigned long)hv_cpu
->post_msg_page
);
147 kfree(hv_context
.hv_numa_map
);
151 * hv_synic_init - Initialize the Synthetic Interrupt Controller.
153 * If it is already initialized by another entity (ie x2v shim), we need to
154 * retrieve the initialized message and event pages. Otherwise, we create and
155 * initialize the message and event pages.
157 int hv_synic_init(unsigned int cpu
)
159 struct hv_per_cpu_context
*hv_cpu
160 = per_cpu_ptr(hv_context
.cpu_context
, cpu
);
161 union hv_synic_simp simp
;
162 union hv_synic_siefp siefp
;
163 union hv_synic_sint shared_sint
;
164 union hv_synic_scontrol sctrl
;
166 /* Setup the Synic's message page */
167 hv_get_simp(simp
.as_uint64
);
168 simp
.simp_enabled
= 1;
169 simp
.base_simp_gpa
= virt_to_phys(hv_cpu
->synic_message_page
)
172 hv_set_simp(simp
.as_uint64
);
174 /* Setup the Synic's event page */
175 hv_get_siefp(siefp
.as_uint64
);
176 siefp
.siefp_enabled
= 1;
177 siefp
.base_siefp_gpa
= virt_to_phys(hv_cpu
->synic_event_page
)
180 hv_set_siefp(siefp
.as_uint64
);
182 /* Setup the shared SINT. */
183 hv_get_synint_state(VMBUS_MESSAGE_SINT
, shared_sint
.as_uint64
);
185 shared_sint
.vector
= HYPERVISOR_CALLBACK_VECTOR
;
186 shared_sint
.masked
= false;
187 if (ms_hyperv
.hints
& HV_DEPRECATING_AEOI_RECOMMENDED
)
188 shared_sint
.auto_eoi
= false;
190 shared_sint
.auto_eoi
= true;
192 hv_set_synint_state(VMBUS_MESSAGE_SINT
, shared_sint
.as_uint64
);
194 /* Enable the global synic bit */
195 hv_get_synic_state(sctrl
.as_uint64
);
198 hv_set_synic_state(sctrl
.as_uint64
);
206 * hv_synic_cleanup - Cleanup routine for hv_synic_init().
208 int hv_synic_cleanup(unsigned int cpu
)
210 union hv_synic_sint shared_sint
;
211 union hv_synic_simp simp
;
212 union hv_synic_siefp siefp
;
213 union hv_synic_scontrol sctrl
;
214 struct vmbus_channel
*channel
, *sc
;
215 bool channel_found
= false;
218 hv_get_synic_state(sctrl
.as_uint64
);
219 if (sctrl
.enable
!= 1)
223 * Search for channels which are bound to the CPU we're about to
224 * cleanup. In case we find one and vmbus is still connected we need to
225 * fail, this will effectively prevent CPU offlining. There is no way
226 * we can re-bind channels to different CPUs for now.
228 mutex_lock(&vmbus_connection
.channel_mutex
);
229 list_for_each_entry(channel
, &vmbus_connection
.chn_list
, listentry
) {
230 if (channel
->target_cpu
== cpu
) {
231 channel_found
= true;
234 spin_lock_irqsave(&channel
->lock
, flags
);
235 list_for_each_entry(sc
, &channel
->sc_list
, sc_list
) {
236 if (sc
->target_cpu
== cpu
) {
237 channel_found
= true;
241 spin_unlock_irqrestore(&channel
->lock
, flags
);
245 mutex_unlock(&vmbus_connection
.channel_mutex
);
247 if (channel_found
&& vmbus_connection
.conn_state
== CONNECTED
)
250 hv_stimer_cleanup(cpu
);
252 hv_get_synint_state(VMBUS_MESSAGE_SINT
, shared_sint
.as_uint64
);
254 shared_sint
.masked
= 1;
256 /* Need to correctly cleanup in the case of SMP!!! */
257 /* Disable the interrupt */
258 hv_set_synint_state(VMBUS_MESSAGE_SINT
, shared_sint
.as_uint64
);
260 hv_get_simp(simp
.as_uint64
);
261 simp
.simp_enabled
= 0;
262 simp
.base_simp_gpa
= 0;
264 hv_set_simp(simp
.as_uint64
);
266 hv_get_siefp(siefp
.as_uint64
);
267 siefp
.siefp_enabled
= 0;
268 siefp
.base_siefp_gpa
= 0;
270 hv_set_siefp(siefp
.as_uint64
);
272 /* Disable the global synic bit */
274 hv_set_synic_state(sctrl
.as_uint64
);