1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2024 Loongson Technology Corporation Limited
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_ipi.h>
8 #include <asm/kvm_vcpu.h>
10 static void ipi_send(struct kvm
*kvm
, uint64_t data
)
14 struct kvm_vcpu
*vcpu
;
15 struct kvm_interrupt irq
;
17 cpu
= ((data
& 0xffffffff) >> 16) & 0x3ff;
18 vcpu
= kvm_get_vcpu_by_cpuid(kvm
, cpu
);
19 if (unlikely(vcpu
== NULL
)) {
20 kvm_err("%s: invalid target cpu: %d\n", __func__
, cpu
);
24 action
= BIT(data
& 0x1f);
25 spin_lock(&vcpu
->arch
.ipi_state
.lock
);
26 status
= vcpu
->arch
.ipi_state
.status
;
27 vcpu
->arch
.ipi_state
.status
|= action
;
28 spin_unlock(&vcpu
->arch
.ipi_state
.lock
);
30 irq
.irq
= LARCH_INT_IPI
;
31 kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
35 static void ipi_clear(struct kvm_vcpu
*vcpu
, uint64_t data
)
38 struct kvm_interrupt irq
;
40 spin_lock(&vcpu
->arch
.ipi_state
.lock
);
41 vcpu
->arch
.ipi_state
.status
&= ~data
;
42 status
= vcpu
->arch
.ipi_state
.status
;
43 spin_unlock(&vcpu
->arch
.ipi_state
.lock
);
45 irq
.irq
= -LARCH_INT_IPI
;
46 kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
50 static uint64_t read_mailbox(struct kvm_vcpu
*vcpu
, int offset
, int len
)
54 spin_lock(&vcpu
->arch
.ipi_state
.lock
);
55 data
= *(ulong
*)((void *)vcpu
->arch
.ipi_state
.buf
+ (offset
- 0x20));
56 spin_unlock(&vcpu
->arch
.ipi_state
.lock
);
64 return data
& 0xffffffff;
68 kvm_err("%s: unknown data len: %d\n", __func__
, len
);
73 static void write_mailbox(struct kvm_vcpu
*vcpu
, int offset
, uint64_t data
, int len
)
77 spin_lock(&vcpu
->arch
.ipi_state
.lock
);
78 pbuf
= (void *)vcpu
->arch
.ipi_state
.buf
+ (offset
- 0x20);
82 *(unsigned char *)pbuf
= (unsigned char)data
;
85 *(unsigned short *)pbuf
= (unsigned short)data
;
88 *(unsigned int *)pbuf
= (unsigned int)data
;
91 *(unsigned long *)pbuf
= (unsigned long)data
;
94 kvm_err("%s: unknown data len: %d\n", __func__
, len
);
96 spin_unlock(&vcpu
->arch
.ipi_state
.lock
);
99 static int send_ipi_data(struct kvm_vcpu
*vcpu
, gpa_t addr
, uint64_t data
)
102 uint32_t val
= 0, mask
= 0;
105 * Bit 27-30 is mask for byte writing.
106 * If the mask is 0, we need not to do anything.
108 if ((data
>> 27) & 0xf) {
109 /* Read the old val */
110 ret
= kvm_io_bus_read(vcpu
, KVM_IOCSR_BUS
, addr
, sizeof(val
), &val
);
112 kvm_err("%s: : read date from addr %llx failed\n", __func__
, addr
);
115 /* Construct the mask by scanning the bit 27-30 */
116 for (i
= 0; i
< 4; i
++) {
117 if (data
& (BIT(27 + i
)))
118 mask
|= (0xff << (i
* 8));
120 /* Save the old part of val */
123 val
|= ((uint32_t)(data
>> 32) & ~mask
);
124 ret
= kvm_io_bus_write(vcpu
, KVM_IOCSR_BUS
, addr
, sizeof(val
), &val
);
126 kvm_err("%s: : write date to addr %llx failed\n", __func__
, addr
);
131 static int mail_send(struct kvm
*kvm
, uint64_t data
)
133 int cpu
, mailbox
, offset
;
134 struct kvm_vcpu
*vcpu
;
136 cpu
= ((data
& 0xffffffff) >> 16) & 0x3ff;
137 vcpu
= kvm_get_vcpu_by_cpuid(kvm
, cpu
);
138 if (unlikely(vcpu
== NULL
)) {
139 kvm_err("%s: invalid target cpu: %d\n", __func__
, cpu
);
142 mailbox
= ((data
& 0xffffffff) >> 2) & 0x7;
143 offset
= IOCSR_IPI_BASE
+ IOCSR_IPI_BUF_20
+ mailbox
* 4;
145 return send_ipi_data(vcpu
, offset
, data
);
148 static int any_send(struct kvm
*kvm
, uint64_t data
)
151 struct kvm_vcpu
*vcpu
;
153 cpu
= ((data
& 0xffffffff) >> 16) & 0x3ff;
154 vcpu
= kvm_get_vcpu_by_cpuid(kvm
, cpu
);
155 if (unlikely(vcpu
== NULL
)) {
156 kvm_err("%s: invalid target cpu: %d\n", __func__
, cpu
);
159 offset
= data
& 0xffff;
161 return send_ipi_data(vcpu
, offset
, data
);
164 static int loongarch_ipi_readl(struct kvm_vcpu
*vcpu
, gpa_t addr
, int len
, void *val
)
170 offset
= (uint32_t)(addr
& 0x1ff);
171 WARN_ON_ONCE(offset
& (len
- 1));
174 case IOCSR_IPI_STATUS
:
175 spin_lock(&vcpu
->arch
.ipi_state
.lock
);
176 res
= vcpu
->arch
.ipi_state
.status
;
177 spin_unlock(&vcpu
->arch
.ipi_state
.lock
);
180 spin_lock(&vcpu
->arch
.ipi_state
.lock
);
181 res
= vcpu
->arch
.ipi_state
.en
;
182 spin_unlock(&vcpu
->arch
.ipi_state
.lock
);
187 case IOCSR_IPI_CLEAR
:
190 case IOCSR_IPI_BUF_20
... IOCSR_IPI_BUF_38
+ 7:
191 if (offset
+ len
> IOCSR_IPI_BUF_38
+ 8) {
192 kvm_err("%s: invalid offset or len: offset = %d, len = %d\n",
193 __func__
, offset
, len
);
197 res
= read_mailbox(vcpu
, offset
, len
);
200 kvm_err("%s: unknown addr: %llx\n", __func__
, addr
);
204 *(uint64_t *)val
= res
;
209 static int loongarch_ipi_writel(struct kvm_vcpu
*vcpu
, gpa_t addr
, int len
, const void *val
)
215 data
= *(uint64_t *)val
;
217 offset
= (uint32_t)(addr
& 0x1ff);
218 WARN_ON_ONCE(offset
& (len
- 1));
221 case IOCSR_IPI_STATUS
:
225 spin_lock(&vcpu
->arch
.ipi_state
.lock
);
226 vcpu
->arch
.ipi_state
.en
= data
;
227 spin_unlock(&vcpu
->arch
.ipi_state
.lock
);
232 case IOCSR_IPI_CLEAR
:
233 /* Just clear the status of the current vcpu */
234 ipi_clear(vcpu
, data
);
236 case IOCSR_IPI_BUF_20
... IOCSR_IPI_BUF_38
+ 7:
237 if (offset
+ len
> IOCSR_IPI_BUF_38
+ 8) {
238 kvm_err("%s: invalid offset or len: offset = %d, len = %d\n",
239 __func__
, offset
, len
);
243 write_mailbox(vcpu
, offset
, data
, len
);
246 ipi_send(vcpu
->kvm
, data
);
248 case IOCSR_MAIL_SEND
:
249 ret
= mail_send(vcpu
->kvm
, *(uint64_t *)val
);
252 ret
= any_send(vcpu
->kvm
, *(uint64_t *)val
);
255 kvm_err("%s: unknown addr: %llx\n", __func__
, addr
);
263 static int kvm_ipi_read(struct kvm_vcpu
*vcpu
,
264 struct kvm_io_device
*dev
,
265 gpa_t addr
, int len
, void *val
)
268 struct loongarch_ipi
*ipi
;
270 ipi
= vcpu
->kvm
->arch
.ipi
;
272 kvm_err("%s: ipi irqchip not valid!\n", __func__
);
275 ipi
->kvm
->stat
.ipi_read_exits
++;
276 ret
= loongarch_ipi_readl(vcpu
, addr
, len
, val
);
281 static int kvm_ipi_write(struct kvm_vcpu
*vcpu
,
282 struct kvm_io_device
*dev
,
283 gpa_t addr
, int len
, const void *val
)
286 struct loongarch_ipi
*ipi
;
288 ipi
= vcpu
->kvm
->arch
.ipi
;
290 kvm_err("%s: ipi irqchip not valid!\n", __func__
);
293 ipi
->kvm
->stat
.ipi_write_exits
++;
294 ret
= loongarch_ipi_writel(vcpu
, addr
, len
, val
);
299 static const struct kvm_io_device_ops kvm_ipi_ops
= {
300 .read
= kvm_ipi_read
,
301 .write
= kvm_ipi_write
,
304 static int kvm_ipi_regs_access(struct kvm_device
*dev
,
305 struct kvm_device_attr
*attr
,
312 struct kvm_vcpu
*vcpu
;
314 cpu
= (attr
->attr
>> 16) & 0x3ff;
315 addr
= attr
->attr
& 0xff;
317 vcpu
= kvm_get_vcpu(dev
->kvm
, cpu
);
318 if (unlikely(vcpu
== NULL
)) {
319 kvm_err("%s: invalid target cpu: %d\n", __func__
, cpu
);
324 case IOCSR_IPI_STATUS
:
325 p
= &vcpu
->arch
.ipi_state
.status
;
328 p
= &vcpu
->arch
.ipi_state
.en
;
331 p
= &vcpu
->arch
.ipi_state
.set
;
333 case IOCSR_IPI_CLEAR
:
334 p
= &vcpu
->arch
.ipi_state
.clear
;
336 case IOCSR_IPI_BUF_20
:
337 p
= &vcpu
->arch
.ipi_state
.buf
[0];
340 case IOCSR_IPI_BUF_28
:
341 p
= &vcpu
->arch
.ipi_state
.buf
[1];
344 case IOCSR_IPI_BUF_30
:
345 p
= &vcpu
->arch
.ipi_state
.buf
[2];
348 case IOCSR_IPI_BUF_38
:
349 p
= &vcpu
->arch
.ipi_state
.buf
[3];
353 kvm_err("%s: unknown ipi register, addr = %d\n", __func__
, addr
);
359 if (get_user(val
, (uint32_t __user
*)attr
->addr
))
361 *(uint32_t *)p
= (uint32_t)val
;
362 } else if (len
== 8) {
363 if (get_user(val
, (uint64_t __user
*)attr
->addr
))
365 *(uint64_t *)p
= val
;
369 val
= *(uint32_t *)p
;
370 return put_user(val
, (uint32_t __user
*)attr
->addr
);
371 } else if (len
== 8) {
372 val
= *(uint64_t *)p
;
373 return put_user(val
, (uint64_t __user
*)attr
->addr
);
380 static int kvm_ipi_get_attr(struct kvm_device
*dev
,
381 struct kvm_device_attr
*attr
)
383 switch (attr
->group
) {
384 case KVM_DEV_LOONGARCH_IPI_GRP_REGS
:
385 return kvm_ipi_regs_access(dev
, attr
, false);
387 kvm_err("%s: unknown group (%d)\n", __func__
, attr
->group
);
392 static int kvm_ipi_set_attr(struct kvm_device
*dev
,
393 struct kvm_device_attr
*attr
)
395 switch (attr
->group
) {
396 case KVM_DEV_LOONGARCH_IPI_GRP_REGS
:
397 return kvm_ipi_regs_access(dev
, attr
, true);
399 kvm_err("%s: unknown group (%d)\n", __func__
, attr
->group
);
404 static int kvm_ipi_create(struct kvm_device
*dev
, u32 type
)
408 struct kvm_io_device
*device
;
409 struct loongarch_ipi
*s
;
412 kvm_err("%s: kvm_device ptr is invalid!\n", __func__
);
418 kvm_err("%s: LoongArch IPI has already been created!\n", __func__
);
422 s
= kzalloc(sizeof(struct loongarch_ipi
), GFP_KERNEL
);
426 spin_lock_init(&s
->lock
);
430 * Initialize IOCSR device
433 kvm_iodevice_init(device
, &kvm_ipi_ops
);
434 mutex_lock(&kvm
->slots_lock
);
435 ret
= kvm_io_bus_register_dev(kvm
, KVM_IOCSR_BUS
, IOCSR_IPI_BASE
, IOCSR_IPI_SIZE
, device
);
436 mutex_unlock(&kvm
->slots_lock
);
438 kvm_err("%s: Initialize IOCSR dev failed, ret = %d\n", __func__
, ret
);
450 static void kvm_ipi_destroy(struct kvm_device
*dev
)
453 struct loongarch_ipi
*ipi
;
455 if (!dev
|| !dev
->kvm
|| !dev
->kvm
->arch
.ipi
)
460 kvm_io_bus_unregister_dev(kvm
, KVM_IOCSR_BUS
, &ipi
->device
);
464 static struct kvm_device_ops kvm_ipi_dev_ops
= {
465 .name
= "kvm-loongarch-ipi",
466 .create
= kvm_ipi_create
,
467 .destroy
= kvm_ipi_destroy
,
468 .set_attr
= kvm_ipi_set_attr
,
469 .get_attr
= kvm_ipi_get_attr
,
472 int kvm_loongarch_register_ipi_device(void)
474 return kvm_register_device_ops(&kvm_ipi_dev_ops
, KVM_DEV_TYPE_LOONGARCH_IPI
);