1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2024 Loongson Technology Corporation Limited
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_ipi.h>
8 #include <asm/kvm_vcpu.h>
10 static void ipi_send(struct kvm
*kvm
, uint64_t data
)
14 struct kvm_vcpu
*vcpu
;
15 struct kvm_interrupt irq
;
17 cpu
= ((data
& 0xffffffff) >> 16) & 0x3ff;
18 vcpu
= kvm_get_vcpu_by_cpuid(kvm
, cpu
);
19 if (unlikely(vcpu
== NULL
)) {
20 kvm_err("%s: invalid target cpu: %d\n", __func__
, cpu
);
24 action
= BIT(data
& 0x1f);
25 spin_lock(&vcpu
->arch
.ipi_state
.lock
);
26 status
= vcpu
->arch
.ipi_state
.status
;
27 vcpu
->arch
.ipi_state
.status
|= action
;
28 spin_unlock(&vcpu
->arch
.ipi_state
.lock
);
30 irq
.irq
= LARCH_INT_IPI
;
31 kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
35 static void ipi_clear(struct kvm_vcpu
*vcpu
, uint64_t data
)
38 struct kvm_interrupt irq
;
40 spin_lock(&vcpu
->arch
.ipi_state
.lock
);
41 vcpu
->arch
.ipi_state
.status
&= ~data
;
42 status
= vcpu
->arch
.ipi_state
.status
;
43 spin_unlock(&vcpu
->arch
.ipi_state
.lock
);
45 irq
.irq
= -LARCH_INT_IPI
;
46 kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
50 static uint64_t read_mailbox(struct kvm_vcpu
*vcpu
, int offset
, int len
)
54 spin_lock(&vcpu
->arch
.ipi_state
.lock
);
55 data
= *(ulong
*)((void *)vcpu
->arch
.ipi_state
.buf
+ (offset
- 0x20));
56 spin_unlock(&vcpu
->arch
.ipi_state
.lock
);
64 return data
& 0xffffffff;
68 kvm_err("%s: unknown data len: %d\n", __func__
, len
);
73 static void write_mailbox(struct kvm_vcpu
*vcpu
, int offset
, uint64_t data
, int len
)
77 spin_lock(&vcpu
->arch
.ipi_state
.lock
);
78 pbuf
= (void *)vcpu
->arch
.ipi_state
.buf
+ (offset
- 0x20);
82 *(unsigned char *)pbuf
= (unsigned char)data
;
85 *(unsigned short *)pbuf
= (unsigned short)data
;
88 *(unsigned int *)pbuf
= (unsigned int)data
;
91 *(unsigned long *)pbuf
= (unsigned long)data
;
94 kvm_err("%s: unknown data len: %d\n", __func__
, len
);
96 spin_unlock(&vcpu
->arch
.ipi_state
.lock
);
99 static int send_ipi_data(struct kvm_vcpu
*vcpu
, gpa_t addr
, uint64_t data
)
102 uint32_t val
= 0, mask
= 0;
105 * Bit 27-30 is mask for byte writing.
106 * If the mask is 0, we need not to do anything.
108 if ((data
>> 27) & 0xf) {
109 /* Read the old val */
110 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
111 ret
= kvm_io_bus_read(vcpu
, KVM_IOCSR_BUS
, addr
, sizeof(val
), &val
);
112 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
114 kvm_err("%s: : read date from addr %llx failed\n", __func__
, addr
);
117 /* Construct the mask by scanning the bit 27-30 */
118 for (i
= 0; i
< 4; i
++) {
119 if (data
& (BIT(27 + i
)))
120 mask
|= (0xff << (i
* 8));
122 /* Save the old part of val */
125 val
|= ((uint32_t)(data
>> 32) & ~mask
);
126 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
127 ret
= kvm_io_bus_write(vcpu
, KVM_IOCSR_BUS
, addr
, sizeof(val
), &val
);
128 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
130 kvm_err("%s: : write date to addr %llx failed\n", __func__
, addr
);
135 static int mail_send(struct kvm
*kvm
, uint64_t data
)
137 int cpu
, mailbox
, offset
;
138 struct kvm_vcpu
*vcpu
;
140 cpu
= ((data
& 0xffffffff) >> 16) & 0x3ff;
141 vcpu
= kvm_get_vcpu_by_cpuid(kvm
, cpu
);
142 if (unlikely(vcpu
== NULL
)) {
143 kvm_err("%s: invalid target cpu: %d\n", __func__
, cpu
);
146 mailbox
= ((data
& 0xffffffff) >> 2) & 0x7;
147 offset
= IOCSR_IPI_BASE
+ IOCSR_IPI_BUF_20
+ mailbox
* 4;
149 return send_ipi_data(vcpu
, offset
, data
);
152 static int any_send(struct kvm
*kvm
, uint64_t data
)
155 struct kvm_vcpu
*vcpu
;
157 cpu
= ((data
& 0xffffffff) >> 16) & 0x3ff;
158 vcpu
= kvm_get_vcpu_by_cpuid(kvm
, cpu
);
159 if (unlikely(vcpu
== NULL
)) {
160 kvm_err("%s: invalid target cpu: %d\n", __func__
, cpu
);
163 offset
= data
& 0xffff;
165 return send_ipi_data(vcpu
, offset
, data
);
168 static int loongarch_ipi_readl(struct kvm_vcpu
*vcpu
, gpa_t addr
, int len
, void *val
)
174 offset
= (uint32_t)(addr
& 0x1ff);
175 WARN_ON_ONCE(offset
& (len
- 1));
178 case IOCSR_IPI_STATUS
:
179 spin_lock(&vcpu
->arch
.ipi_state
.lock
);
180 res
= vcpu
->arch
.ipi_state
.status
;
181 spin_unlock(&vcpu
->arch
.ipi_state
.lock
);
184 spin_lock(&vcpu
->arch
.ipi_state
.lock
);
185 res
= vcpu
->arch
.ipi_state
.en
;
186 spin_unlock(&vcpu
->arch
.ipi_state
.lock
);
191 case IOCSR_IPI_CLEAR
:
194 case IOCSR_IPI_BUF_20
... IOCSR_IPI_BUF_38
+ 7:
195 if (offset
+ len
> IOCSR_IPI_BUF_38
+ 8) {
196 kvm_err("%s: invalid offset or len: offset = %d, len = %d\n",
197 __func__
, offset
, len
);
201 res
= read_mailbox(vcpu
, offset
, len
);
204 kvm_err("%s: unknown addr: %llx\n", __func__
, addr
);
208 *(uint64_t *)val
= res
;
213 static int loongarch_ipi_writel(struct kvm_vcpu
*vcpu
, gpa_t addr
, int len
, const void *val
)
219 data
= *(uint64_t *)val
;
221 offset
= (uint32_t)(addr
& 0x1ff);
222 WARN_ON_ONCE(offset
& (len
- 1));
225 case IOCSR_IPI_STATUS
:
229 spin_lock(&vcpu
->arch
.ipi_state
.lock
);
230 vcpu
->arch
.ipi_state
.en
= data
;
231 spin_unlock(&vcpu
->arch
.ipi_state
.lock
);
236 case IOCSR_IPI_CLEAR
:
237 /* Just clear the status of the current vcpu */
238 ipi_clear(vcpu
, data
);
240 case IOCSR_IPI_BUF_20
... IOCSR_IPI_BUF_38
+ 7:
241 if (offset
+ len
> IOCSR_IPI_BUF_38
+ 8) {
242 kvm_err("%s: invalid offset or len: offset = %d, len = %d\n",
243 __func__
, offset
, len
);
247 write_mailbox(vcpu
, offset
, data
, len
);
250 ipi_send(vcpu
->kvm
, data
);
252 case IOCSR_MAIL_SEND
:
253 ret
= mail_send(vcpu
->kvm
, *(uint64_t *)val
);
256 ret
= any_send(vcpu
->kvm
, *(uint64_t *)val
);
259 kvm_err("%s: unknown addr: %llx\n", __func__
, addr
);
267 static int kvm_ipi_read(struct kvm_vcpu
*vcpu
,
268 struct kvm_io_device
*dev
,
269 gpa_t addr
, int len
, void *val
)
272 struct loongarch_ipi
*ipi
;
274 ipi
= vcpu
->kvm
->arch
.ipi
;
276 kvm_err("%s: ipi irqchip not valid!\n", __func__
);
279 ipi
->kvm
->stat
.ipi_read_exits
++;
280 ret
= loongarch_ipi_readl(vcpu
, addr
, len
, val
);
285 static int kvm_ipi_write(struct kvm_vcpu
*vcpu
,
286 struct kvm_io_device
*dev
,
287 gpa_t addr
, int len
, const void *val
)
290 struct loongarch_ipi
*ipi
;
292 ipi
= vcpu
->kvm
->arch
.ipi
;
294 kvm_err("%s: ipi irqchip not valid!\n", __func__
);
297 ipi
->kvm
->stat
.ipi_write_exits
++;
298 ret
= loongarch_ipi_writel(vcpu
, addr
, len
, val
);
303 static const struct kvm_io_device_ops kvm_ipi_ops
= {
304 .read
= kvm_ipi_read
,
305 .write
= kvm_ipi_write
,
308 static int kvm_ipi_regs_access(struct kvm_device
*dev
,
309 struct kvm_device_attr
*attr
,
316 struct kvm_vcpu
*vcpu
;
318 cpu
= (attr
->attr
>> 16) & 0x3ff;
319 addr
= attr
->attr
& 0xff;
321 vcpu
= kvm_get_vcpu(dev
->kvm
, cpu
);
322 if (unlikely(vcpu
== NULL
)) {
323 kvm_err("%s: invalid target cpu: %d\n", __func__
, cpu
);
328 case IOCSR_IPI_STATUS
:
329 p
= &vcpu
->arch
.ipi_state
.status
;
332 p
= &vcpu
->arch
.ipi_state
.en
;
335 p
= &vcpu
->arch
.ipi_state
.set
;
337 case IOCSR_IPI_CLEAR
:
338 p
= &vcpu
->arch
.ipi_state
.clear
;
340 case IOCSR_IPI_BUF_20
:
341 p
= &vcpu
->arch
.ipi_state
.buf
[0];
344 case IOCSR_IPI_BUF_28
:
345 p
= &vcpu
->arch
.ipi_state
.buf
[1];
348 case IOCSR_IPI_BUF_30
:
349 p
= &vcpu
->arch
.ipi_state
.buf
[2];
352 case IOCSR_IPI_BUF_38
:
353 p
= &vcpu
->arch
.ipi_state
.buf
[3];
357 kvm_err("%s: unknown ipi register, addr = %d\n", __func__
, addr
);
363 if (get_user(val
, (uint32_t __user
*)attr
->addr
))
365 *(uint32_t *)p
= (uint32_t)val
;
366 } else if (len
== 8) {
367 if (get_user(val
, (uint64_t __user
*)attr
->addr
))
369 *(uint64_t *)p
= val
;
373 val
= *(uint32_t *)p
;
374 return put_user(val
, (uint32_t __user
*)attr
->addr
);
375 } else if (len
== 8) {
376 val
= *(uint64_t *)p
;
377 return put_user(val
, (uint64_t __user
*)attr
->addr
);
384 static int kvm_ipi_get_attr(struct kvm_device
*dev
,
385 struct kvm_device_attr
*attr
)
387 switch (attr
->group
) {
388 case KVM_DEV_LOONGARCH_IPI_GRP_REGS
:
389 return kvm_ipi_regs_access(dev
, attr
, false);
391 kvm_err("%s: unknown group (%d)\n", __func__
, attr
->group
);
396 static int kvm_ipi_set_attr(struct kvm_device
*dev
,
397 struct kvm_device_attr
*attr
)
399 switch (attr
->group
) {
400 case KVM_DEV_LOONGARCH_IPI_GRP_REGS
:
401 return kvm_ipi_regs_access(dev
, attr
, true);
403 kvm_err("%s: unknown group (%d)\n", __func__
, attr
->group
);
408 static int kvm_ipi_create(struct kvm_device
*dev
, u32 type
)
412 struct kvm_io_device
*device
;
413 struct loongarch_ipi
*s
;
416 kvm_err("%s: kvm_device ptr is invalid!\n", __func__
);
422 kvm_err("%s: LoongArch IPI has already been created!\n", __func__
);
426 s
= kzalloc(sizeof(struct loongarch_ipi
), GFP_KERNEL
);
430 spin_lock_init(&s
->lock
);
434 * Initialize IOCSR device
437 kvm_iodevice_init(device
, &kvm_ipi_ops
);
438 mutex_lock(&kvm
->slots_lock
);
439 ret
= kvm_io_bus_register_dev(kvm
, KVM_IOCSR_BUS
, IOCSR_IPI_BASE
, IOCSR_IPI_SIZE
, device
);
440 mutex_unlock(&kvm
->slots_lock
);
442 kvm_err("%s: Initialize IOCSR dev failed, ret = %d\n", __func__
, ret
);
454 static void kvm_ipi_destroy(struct kvm_device
*dev
)
457 struct loongarch_ipi
*ipi
;
459 if (!dev
|| !dev
->kvm
|| !dev
->kvm
->arch
.ipi
)
464 kvm_io_bus_unregister_dev(kvm
, KVM_IOCSR_BUS
, &ipi
->device
);
468 static struct kvm_device_ops kvm_ipi_dev_ops
= {
469 .name
= "kvm-loongarch-ipi",
470 .create
= kvm_ipi_create
,
471 .destroy
= kvm_ipi_destroy
,
472 .set_attr
= kvm_ipi_set_attr
,
473 .get_attr
= kvm_ipi_get_attr
,
476 int kvm_loongarch_register_ipi_device(void)
478 return kvm_register_device_ops(&kvm_ipi_dev_ops
, KVM_DEV_TYPE_LOONGARCH_IPI
);