1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2024 Loongson Technology Corporation Limited
6 #include <asm/kvm_eiointc.h>
7 #include <asm/kvm_vcpu.h>
8 #include <linux/count_zeros.h>
10 static void eiointc_set_sw_coreisr(struct loongarch_eiointc
*s
)
12 int ipnum
, cpu
, irq_index
, irq_mask
, irq
;
14 for (irq
= 0; irq
< EIOINTC_IRQS
; irq
++) {
15 ipnum
= s
->ipmap
.reg_u8
[irq
/ 32];
16 if (!(s
->status
& BIT(EIOINTC_ENABLE_INT_ENCODE
))) {
17 ipnum
= count_trailing_zeros(ipnum
);
18 ipnum
= (ipnum
>= 0 && ipnum
< 4) ? ipnum
: 0;
21 irq_mask
= BIT(irq
& 0x1f);
23 cpu
= s
->coremap
.reg_u8
[irq
];
24 if (!!(s
->coreisr
.reg_u32
[cpu
][irq_index
] & irq_mask
))
25 set_bit(irq
, s
->sw_coreisr
[cpu
][ipnum
]);
27 clear_bit(irq
, s
->sw_coreisr
[cpu
][ipnum
]);
31 static void eiointc_update_irq(struct loongarch_eiointc
*s
, int irq
, int level
)
33 int ipnum
, cpu
, found
, irq_index
, irq_mask
;
34 struct kvm_vcpu
*vcpu
;
35 struct kvm_interrupt vcpu_irq
;
37 ipnum
= s
->ipmap
.reg_u8
[irq
/ 32];
38 if (!(s
->status
& BIT(EIOINTC_ENABLE_INT_ENCODE
))) {
39 ipnum
= count_trailing_zeros(ipnum
);
40 ipnum
= (ipnum
>= 0 && ipnum
< 4) ? ipnum
: 0;
43 cpu
= s
->sw_coremap
[irq
];
44 vcpu
= kvm_get_vcpu(s
->kvm
, cpu
);
46 irq_mask
= BIT(irq
& 0x1f);
49 /* if not enable return false */
50 if (((s
->enable
.reg_u32
[irq_index
]) & irq_mask
) == 0)
52 s
->coreisr
.reg_u32
[cpu
][irq_index
] |= irq_mask
;
53 found
= find_first_bit(s
->sw_coreisr
[cpu
][ipnum
], EIOINTC_IRQS
);
54 set_bit(irq
, s
->sw_coreisr
[cpu
][ipnum
]);
56 s
->coreisr
.reg_u32
[cpu
][irq_index
] &= ~irq_mask
;
57 clear_bit(irq
, s
->sw_coreisr
[cpu
][ipnum
]);
58 found
= find_first_bit(s
->sw_coreisr
[cpu
][ipnum
], EIOINTC_IRQS
);
61 if (found
< EIOINTC_IRQS
)
62 return; /* other irq is handling, needn't update parent irq */
64 vcpu_irq
.irq
= level
? (INT_HWI0
+ ipnum
) : -(INT_HWI0
+ ipnum
);
65 kvm_vcpu_ioctl_interrupt(vcpu
, &vcpu_irq
);
68 static inline void eiointc_update_sw_coremap(struct loongarch_eiointc
*s
,
69 int irq
, void *pvalue
, u32 len
, bool notify
)
72 u64 val
= *(u64
*)pvalue
;
74 for (i
= 0; i
< len
; i
++) {
78 if (!(s
->status
& BIT(EIOINTC_ENABLE_CPU_ENCODE
))) {
80 cpu
= (cpu
>= 4) ? 0 : cpu
;
83 if (s
->sw_coremap
[irq
+ i
] == cpu
)
86 if (notify
&& test_bit(irq
+ i
, (unsigned long *)s
->isr
.reg_u8
)) {
87 /* lower irq at old cpu and raise irq at new cpu */
88 eiointc_update_irq(s
, irq
+ i
, 0);
89 s
->sw_coremap
[irq
+ i
] = cpu
;
90 eiointc_update_irq(s
, irq
+ i
, 1);
92 s
->sw_coremap
[irq
+ i
] = cpu
;
97 void eiointc_set_irq(struct loongarch_eiointc
*s
, int irq
, int level
)
100 unsigned long *isr
= (unsigned long *)s
->isr
.reg_u8
;
102 level
? set_bit(irq
, isr
) : clear_bit(irq
, isr
);
103 spin_lock_irqsave(&s
->lock
, flags
);
104 eiointc_update_irq(s
, irq
, level
);
105 spin_unlock_irqrestore(&s
->lock
, flags
);
108 static inline void eiointc_enable_irq(struct kvm_vcpu
*vcpu
,
109 struct loongarch_eiointc
*s
, int index
, u8 mask
, int level
)
114 val
= mask
& s
->isr
.reg_u8
[index
];
118 * enable bit change from 0 to 1,
119 * need to update irq by pending bits
121 eiointc_update_irq(s
, irq
- 1 + index
* 8, level
);
122 val
&= ~BIT(irq
- 1);
127 static int loongarch_eiointc_readb(struct kvm_vcpu
*vcpu
, struct loongarch_eiointc
*s
,
128 gpa_t addr
, int len
, void *val
)
134 offset
= addr
- EIOINTC_BASE
;
136 case EIOINTC_NODETYPE_START
... EIOINTC_NODETYPE_END
:
137 index
= offset
- EIOINTC_NODETYPE_START
;
138 data
= s
->nodetype
.reg_u8
[index
];
140 case EIOINTC_IPMAP_START
... EIOINTC_IPMAP_END
:
141 index
= offset
- EIOINTC_IPMAP_START
;
142 data
= s
->ipmap
.reg_u8
[index
];
144 case EIOINTC_ENABLE_START
... EIOINTC_ENABLE_END
:
145 index
= offset
- EIOINTC_ENABLE_START
;
146 data
= s
->enable
.reg_u8
[index
];
148 case EIOINTC_BOUNCE_START
... EIOINTC_BOUNCE_END
:
149 index
= offset
- EIOINTC_BOUNCE_START
;
150 data
= s
->bounce
.reg_u8
[index
];
152 case EIOINTC_COREISR_START
... EIOINTC_COREISR_END
:
153 index
= offset
- EIOINTC_COREISR_START
;
154 data
= s
->coreisr
.reg_u8
[vcpu
->vcpu_id
][index
];
156 case EIOINTC_COREMAP_START
... EIOINTC_COREMAP_END
:
157 index
= offset
- EIOINTC_COREMAP_START
;
158 data
= s
->coremap
.reg_u8
[index
];
169 static int loongarch_eiointc_readw(struct kvm_vcpu
*vcpu
, struct loongarch_eiointc
*s
,
170 gpa_t addr
, int len
, void *val
)
176 offset
= addr
- EIOINTC_BASE
;
178 case EIOINTC_NODETYPE_START
... EIOINTC_NODETYPE_END
:
179 index
= (offset
- EIOINTC_NODETYPE_START
) >> 1;
180 data
= s
->nodetype
.reg_u16
[index
];
182 case EIOINTC_IPMAP_START
... EIOINTC_IPMAP_END
:
183 index
= (offset
- EIOINTC_IPMAP_START
) >> 1;
184 data
= s
->ipmap
.reg_u16
[index
];
186 case EIOINTC_ENABLE_START
... EIOINTC_ENABLE_END
:
187 index
= (offset
- EIOINTC_ENABLE_START
) >> 1;
188 data
= s
->enable
.reg_u16
[index
];
190 case EIOINTC_BOUNCE_START
... EIOINTC_BOUNCE_END
:
191 index
= (offset
- EIOINTC_BOUNCE_START
) >> 1;
192 data
= s
->bounce
.reg_u16
[index
];
194 case EIOINTC_COREISR_START
... EIOINTC_COREISR_END
:
195 index
= (offset
- EIOINTC_COREISR_START
) >> 1;
196 data
= s
->coreisr
.reg_u16
[vcpu
->vcpu_id
][index
];
198 case EIOINTC_COREMAP_START
... EIOINTC_COREMAP_END
:
199 index
= (offset
- EIOINTC_COREMAP_START
) >> 1;
200 data
= s
->coremap
.reg_u16
[index
];
211 static int loongarch_eiointc_readl(struct kvm_vcpu
*vcpu
, struct loongarch_eiointc
*s
,
212 gpa_t addr
, int len
, void *val
)
218 offset
= addr
- EIOINTC_BASE
;
220 case EIOINTC_NODETYPE_START
... EIOINTC_NODETYPE_END
:
221 index
= (offset
- EIOINTC_NODETYPE_START
) >> 2;
222 data
= s
->nodetype
.reg_u32
[index
];
224 case EIOINTC_IPMAP_START
... EIOINTC_IPMAP_END
:
225 index
= (offset
- EIOINTC_IPMAP_START
) >> 2;
226 data
= s
->ipmap
.reg_u32
[index
];
228 case EIOINTC_ENABLE_START
... EIOINTC_ENABLE_END
:
229 index
= (offset
- EIOINTC_ENABLE_START
) >> 2;
230 data
= s
->enable
.reg_u32
[index
];
232 case EIOINTC_BOUNCE_START
... EIOINTC_BOUNCE_END
:
233 index
= (offset
- EIOINTC_BOUNCE_START
) >> 2;
234 data
= s
->bounce
.reg_u32
[index
];
236 case EIOINTC_COREISR_START
... EIOINTC_COREISR_END
:
237 index
= (offset
- EIOINTC_COREISR_START
) >> 2;
238 data
= s
->coreisr
.reg_u32
[vcpu
->vcpu_id
][index
];
240 case EIOINTC_COREMAP_START
... EIOINTC_COREMAP_END
:
241 index
= (offset
- EIOINTC_COREMAP_START
) >> 2;
242 data
= s
->coremap
.reg_u32
[index
];
253 static int loongarch_eiointc_readq(struct kvm_vcpu
*vcpu
, struct loongarch_eiointc
*s
,
254 gpa_t addr
, int len
, void *val
)
260 offset
= addr
- EIOINTC_BASE
;
262 case EIOINTC_NODETYPE_START
... EIOINTC_NODETYPE_END
:
263 index
= (offset
- EIOINTC_NODETYPE_START
) >> 3;
264 data
= s
->nodetype
.reg_u64
[index
];
266 case EIOINTC_IPMAP_START
... EIOINTC_IPMAP_END
:
267 index
= (offset
- EIOINTC_IPMAP_START
) >> 3;
268 data
= s
->ipmap
.reg_u64
;
270 case EIOINTC_ENABLE_START
... EIOINTC_ENABLE_END
:
271 index
= (offset
- EIOINTC_ENABLE_START
) >> 3;
272 data
= s
->enable
.reg_u64
[index
];
274 case EIOINTC_BOUNCE_START
... EIOINTC_BOUNCE_END
:
275 index
= (offset
- EIOINTC_BOUNCE_START
) >> 3;
276 data
= s
->bounce
.reg_u64
[index
];
278 case EIOINTC_COREISR_START
... EIOINTC_COREISR_END
:
279 index
= (offset
- EIOINTC_COREISR_START
) >> 3;
280 data
= s
->coreisr
.reg_u64
[vcpu
->vcpu_id
][index
];
282 case EIOINTC_COREMAP_START
... EIOINTC_COREMAP_END
:
283 index
= (offset
- EIOINTC_COREMAP_START
) >> 3;
284 data
= s
->coremap
.reg_u64
[index
];
295 static int kvm_eiointc_read(struct kvm_vcpu
*vcpu
,
296 struct kvm_io_device
*dev
,
297 gpa_t addr
, int len
, void *val
)
301 struct loongarch_eiointc
*eiointc
= vcpu
->kvm
->arch
.eiointc
;
304 kvm_err("%s: eiointc irqchip not valid!\n", __func__
);
308 vcpu
->kvm
->stat
.eiointc_read_exits
++;
309 spin_lock_irqsave(&eiointc
->lock
, flags
);
312 ret
= loongarch_eiointc_readb(vcpu
, eiointc
, addr
, len
, val
);
315 ret
= loongarch_eiointc_readw(vcpu
, eiointc
, addr
, len
, val
);
318 ret
= loongarch_eiointc_readl(vcpu
, eiointc
, addr
, len
, val
);
321 ret
= loongarch_eiointc_readq(vcpu
, eiointc
, addr
, len
, val
);
324 WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n",
325 __func__
, addr
, len
);
327 spin_unlock_irqrestore(&eiointc
->lock
, flags
);
332 static int loongarch_eiointc_writeb(struct kvm_vcpu
*vcpu
,
333 struct loongarch_eiointc
*s
,
334 gpa_t addr
, int len
, const void *val
)
336 int index
, irq
, bits
, ret
= 0;
339 u8 coreisr
, old_coreisr
;
343 offset
= addr
- EIOINTC_BASE
;
346 case EIOINTC_NODETYPE_START
... EIOINTC_NODETYPE_END
:
347 index
= (offset
- EIOINTC_NODETYPE_START
);
348 s
->nodetype
.reg_u8
[index
] = data
;
350 case EIOINTC_IPMAP_START
... EIOINTC_IPMAP_END
:
352 * ipmap cannot be set at runtime, can be set only at the beginning
353 * of irqchip driver, need not update upper irq level
355 index
= (offset
- EIOINTC_IPMAP_START
);
356 s
->ipmap
.reg_u8
[index
] = data
;
358 case EIOINTC_ENABLE_START
... EIOINTC_ENABLE_END
:
359 index
= (offset
- EIOINTC_ENABLE_START
);
360 old_data
= s
->enable
.reg_u8
[index
];
361 s
->enable
.reg_u8
[index
] = data
;
364 * update irq when isr is set.
366 data
= s
->enable
.reg_u8
[index
] & ~old_data
& s
->isr
.reg_u8
[index
];
367 eiointc_enable_irq(vcpu
, s
, index
, data
, 1);
370 * update irq when isr is set.
372 data
= ~s
->enable
.reg_u8
[index
] & old_data
& s
->isr
.reg_u8
[index
];
373 eiointc_enable_irq(vcpu
, s
, index
, data
, 0);
375 case EIOINTC_BOUNCE_START
... EIOINTC_BOUNCE_END
:
376 /* do not emulate hw bounced irq routing */
377 index
= offset
- EIOINTC_BOUNCE_START
;
378 s
->bounce
.reg_u8
[index
] = data
;
380 case EIOINTC_COREISR_START
... EIOINTC_COREISR_END
:
381 index
= (offset
- EIOINTC_COREISR_START
);
382 /* use attrs to get current cpu index */
385 old_coreisr
= s
->coreisr
.reg_u8
[cpu
][index
];
386 /* write 1 to clear interrupt */
387 s
->coreisr
.reg_u8
[cpu
][index
] = old_coreisr
& ~coreisr
;
388 coreisr
&= old_coreisr
;
389 bits
= sizeof(data
) * 8;
390 irq
= find_first_bit((void *)&coreisr
, bits
);
392 eiointc_update_irq(s
, irq
+ index
* bits
, 0);
393 bitmap_clear((void *)&coreisr
, irq
, 1);
394 irq
= find_first_bit((void *)&coreisr
, bits
);
397 case EIOINTC_COREMAP_START
... EIOINTC_COREMAP_END
:
398 irq
= offset
- EIOINTC_COREMAP_START
;
400 s
->coremap
.reg_u8
[index
] = data
;
401 eiointc_update_sw_coremap(s
, irq
, (void *)&data
, sizeof(data
), true);
411 static int loongarch_eiointc_writew(struct kvm_vcpu
*vcpu
,
412 struct loongarch_eiointc
*s
,
413 gpa_t addr
, int len
, const void *val
)
415 int i
, index
, irq
, bits
, ret
= 0;
418 u16 coreisr
, old_coreisr
;
422 offset
= addr
- EIOINTC_BASE
;
425 case EIOINTC_NODETYPE_START
... EIOINTC_NODETYPE_END
:
426 index
= (offset
- EIOINTC_NODETYPE_START
) >> 1;
427 s
->nodetype
.reg_u16
[index
] = data
;
429 case EIOINTC_IPMAP_START
... EIOINTC_IPMAP_END
:
431 * ipmap cannot be set at runtime, can be set only at the beginning
432 * of irqchip driver, need not update upper irq level
434 index
= (offset
- EIOINTC_IPMAP_START
) >> 1;
435 s
->ipmap
.reg_u16
[index
] = data
;
437 case EIOINTC_ENABLE_START
... EIOINTC_ENABLE_END
:
438 index
= (offset
- EIOINTC_ENABLE_START
) >> 1;
439 old_data
= s
->enable
.reg_u32
[index
];
440 s
->enable
.reg_u16
[index
] = data
;
443 * update irq when isr is set.
445 data
= s
->enable
.reg_u16
[index
] & ~old_data
& s
->isr
.reg_u16
[index
];
447 for (i
= 0; i
< sizeof(data
); i
++) {
448 u8 mask
= (data
>> (i
* 8)) & 0xff;
449 eiointc_enable_irq(vcpu
, s
, index
+ i
, mask
, 1);
453 * update irq when isr is set.
455 data
= ~s
->enable
.reg_u16
[index
] & old_data
& s
->isr
.reg_u16
[index
];
456 for (i
= 0; i
< sizeof(data
); i
++) {
457 u8 mask
= (data
>> (i
* 8)) & 0xff;
458 eiointc_enable_irq(vcpu
, s
, index
, mask
, 0);
461 case EIOINTC_BOUNCE_START
... EIOINTC_BOUNCE_END
:
462 /* do not emulate hw bounced irq routing */
463 index
= (offset
- EIOINTC_BOUNCE_START
) >> 1;
464 s
->bounce
.reg_u16
[index
] = data
;
466 case EIOINTC_COREISR_START
... EIOINTC_COREISR_END
:
467 index
= (offset
- EIOINTC_COREISR_START
) >> 1;
468 /* use attrs to get current cpu index */
471 old_coreisr
= s
->coreisr
.reg_u16
[cpu
][index
];
472 /* write 1 to clear interrupt */
473 s
->coreisr
.reg_u16
[cpu
][index
] = old_coreisr
& ~coreisr
;
474 coreisr
&= old_coreisr
;
475 bits
= sizeof(data
) * 8;
476 irq
= find_first_bit((void *)&coreisr
, bits
);
478 eiointc_update_irq(s
, irq
+ index
* bits
, 0);
479 bitmap_clear((void *)&coreisr
, irq
, 1);
480 irq
= find_first_bit((void *)&coreisr
, bits
);
483 case EIOINTC_COREMAP_START
... EIOINTC_COREMAP_END
:
484 irq
= offset
- EIOINTC_COREMAP_START
;
486 s
->coremap
.reg_u16
[index
] = data
;
487 eiointc_update_sw_coremap(s
, irq
, (void *)&data
, sizeof(data
), true);
497 static int loongarch_eiointc_writel(struct kvm_vcpu
*vcpu
,
498 struct loongarch_eiointc
*s
,
499 gpa_t addr
, int len
, const void *val
)
501 int i
, index
, irq
, bits
, ret
= 0;
504 u32 coreisr
, old_coreisr
;
508 offset
= addr
- EIOINTC_BASE
;
511 case EIOINTC_NODETYPE_START
... EIOINTC_NODETYPE_END
:
512 index
= (offset
- EIOINTC_NODETYPE_START
) >> 2;
513 s
->nodetype
.reg_u32
[index
] = data
;
515 case EIOINTC_IPMAP_START
... EIOINTC_IPMAP_END
:
517 * ipmap cannot be set at runtime, can be set only at the beginning
518 * of irqchip driver, need not update upper irq level
520 index
= (offset
- EIOINTC_IPMAP_START
) >> 2;
521 s
->ipmap
.reg_u32
[index
] = data
;
523 case EIOINTC_ENABLE_START
... EIOINTC_ENABLE_END
:
524 index
= (offset
- EIOINTC_ENABLE_START
) >> 2;
525 old_data
= s
->enable
.reg_u32
[index
];
526 s
->enable
.reg_u32
[index
] = data
;
529 * update irq when isr is set.
531 data
= s
->enable
.reg_u32
[index
] & ~old_data
& s
->isr
.reg_u32
[index
];
533 for (i
= 0; i
< sizeof(data
); i
++) {
534 u8 mask
= (data
>> (i
* 8)) & 0xff;
535 eiointc_enable_irq(vcpu
, s
, index
+ i
, mask
, 1);
539 * update irq when isr is set.
541 data
= ~s
->enable
.reg_u32
[index
] & old_data
& s
->isr
.reg_u32
[index
];
542 for (i
= 0; i
< sizeof(data
); i
++) {
543 u8 mask
= (data
>> (i
* 8)) & 0xff;
544 eiointc_enable_irq(vcpu
, s
, index
, mask
, 0);
547 case EIOINTC_BOUNCE_START
... EIOINTC_BOUNCE_END
:
548 /* do not emulate hw bounced irq routing */
549 index
= (offset
- EIOINTC_BOUNCE_START
) >> 2;
550 s
->bounce
.reg_u32
[index
] = data
;
552 case EIOINTC_COREISR_START
... EIOINTC_COREISR_END
:
553 index
= (offset
- EIOINTC_COREISR_START
) >> 2;
554 /* use attrs to get current cpu index */
557 old_coreisr
= s
->coreisr
.reg_u32
[cpu
][index
];
558 /* write 1 to clear interrupt */
559 s
->coreisr
.reg_u32
[cpu
][index
] = old_coreisr
& ~coreisr
;
560 coreisr
&= old_coreisr
;
561 bits
= sizeof(data
) * 8;
562 irq
= find_first_bit((void *)&coreisr
, bits
);
564 eiointc_update_irq(s
, irq
+ index
* bits
, 0);
565 bitmap_clear((void *)&coreisr
, irq
, 1);
566 irq
= find_first_bit((void *)&coreisr
, bits
);
569 case EIOINTC_COREMAP_START
... EIOINTC_COREMAP_END
:
570 irq
= offset
- EIOINTC_COREMAP_START
;
572 s
->coremap
.reg_u32
[index
] = data
;
573 eiointc_update_sw_coremap(s
, irq
, (void *)&data
, sizeof(data
), true);
583 static int loongarch_eiointc_writeq(struct kvm_vcpu
*vcpu
,
584 struct loongarch_eiointc
*s
,
585 gpa_t addr
, int len
, const void *val
)
587 int i
, index
, irq
, bits
, ret
= 0;
590 u64 coreisr
, old_coreisr
;
594 offset
= addr
- EIOINTC_BASE
;
597 case EIOINTC_NODETYPE_START
... EIOINTC_NODETYPE_END
:
598 index
= (offset
- EIOINTC_NODETYPE_START
) >> 3;
599 s
->nodetype
.reg_u64
[index
] = data
;
601 case EIOINTC_IPMAP_START
... EIOINTC_IPMAP_END
:
603 * ipmap cannot be set at runtime, can be set only at the beginning
604 * of irqchip driver, need not update upper irq level
606 index
= (offset
- EIOINTC_IPMAP_START
) >> 3;
607 s
->ipmap
.reg_u64
= data
;
609 case EIOINTC_ENABLE_START
... EIOINTC_ENABLE_END
:
610 index
= (offset
- EIOINTC_ENABLE_START
) >> 3;
611 old_data
= s
->enable
.reg_u64
[index
];
612 s
->enable
.reg_u64
[index
] = data
;
615 * update irq when isr is set.
617 data
= s
->enable
.reg_u64
[index
] & ~old_data
& s
->isr
.reg_u64
[index
];
619 for (i
= 0; i
< sizeof(data
); i
++) {
620 u8 mask
= (data
>> (i
* 8)) & 0xff;
621 eiointc_enable_irq(vcpu
, s
, index
+ i
, mask
, 1);
625 * update irq when isr is set.
627 data
= ~s
->enable
.reg_u64
[index
] & old_data
& s
->isr
.reg_u64
[index
];
628 for (i
= 0; i
< sizeof(data
); i
++) {
629 u8 mask
= (data
>> (i
* 8)) & 0xff;
630 eiointc_enable_irq(vcpu
, s
, index
, mask
, 0);
633 case EIOINTC_BOUNCE_START
... EIOINTC_BOUNCE_END
:
634 /* do not emulate hw bounced irq routing */
635 index
= (offset
- EIOINTC_BOUNCE_START
) >> 3;
636 s
->bounce
.reg_u64
[index
] = data
;
638 case EIOINTC_COREISR_START
... EIOINTC_COREISR_END
:
639 index
= (offset
- EIOINTC_COREISR_START
) >> 3;
640 /* use attrs to get current cpu index */
643 old_coreisr
= s
->coreisr
.reg_u64
[cpu
][index
];
644 /* write 1 to clear interrupt */
645 s
->coreisr
.reg_u64
[cpu
][index
] = old_coreisr
& ~coreisr
;
646 coreisr
&= old_coreisr
;
647 bits
= sizeof(data
) * 8;
648 irq
= find_first_bit((void *)&coreisr
, bits
);
650 eiointc_update_irq(s
, irq
+ index
* bits
, 0);
651 bitmap_clear((void *)&coreisr
, irq
, 1);
652 irq
= find_first_bit((void *)&coreisr
, bits
);
655 case EIOINTC_COREMAP_START
... EIOINTC_COREMAP_END
:
656 irq
= offset
- EIOINTC_COREMAP_START
;
658 s
->coremap
.reg_u64
[index
] = data
;
659 eiointc_update_sw_coremap(s
, irq
, (void *)&data
, sizeof(data
), true);
669 static int kvm_eiointc_write(struct kvm_vcpu
*vcpu
,
670 struct kvm_io_device
*dev
,
671 gpa_t addr
, int len
, const void *val
)
675 struct loongarch_eiointc
*eiointc
= vcpu
->kvm
->arch
.eiointc
;
678 kvm_err("%s: eiointc irqchip not valid!\n", __func__
);
682 vcpu
->kvm
->stat
.eiointc_write_exits
++;
683 spin_lock_irqsave(&eiointc
->lock
, flags
);
686 ret
= loongarch_eiointc_writeb(vcpu
, eiointc
, addr
, len
, val
);
689 ret
= loongarch_eiointc_writew(vcpu
, eiointc
, addr
, len
, val
);
692 ret
= loongarch_eiointc_writel(vcpu
, eiointc
, addr
, len
, val
);
695 ret
= loongarch_eiointc_writeq(vcpu
, eiointc
, addr
, len
, val
);
698 WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n",
699 __func__
, addr
, len
);
701 spin_unlock_irqrestore(&eiointc
->lock
, flags
);
706 static const struct kvm_io_device_ops kvm_eiointc_ops
= {
707 .read
= kvm_eiointc_read
,
708 .write
= kvm_eiointc_write
,
711 static int kvm_eiointc_virt_read(struct kvm_vcpu
*vcpu
,
712 struct kvm_io_device
*dev
,
713 gpa_t addr
, int len
, void *val
)
717 struct loongarch_eiointc
*eiointc
= vcpu
->kvm
->arch
.eiointc
;
720 kvm_err("%s: eiointc irqchip not valid!\n", __func__
);
724 addr
-= EIOINTC_VIRT_BASE
;
725 spin_lock_irqsave(&eiointc
->lock
, flags
);
727 case EIOINTC_VIRT_FEATURES
:
728 *data
= eiointc
->features
;
730 case EIOINTC_VIRT_CONFIG
:
731 *data
= eiointc
->status
;
736 spin_unlock_irqrestore(&eiointc
->lock
, flags
);
741 static int kvm_eiointc_virt_write(struct kvm_vcpu
*vcpu
,
742 struct kvm_io_device
*dev
,
743 gpa_t addr
, int len
, const void *val
)
747 u32 value
= *(u32
*)val
;
748 struct loongarch_eiointc
*eiointc
= vcpu
->kvm
->arch
.eiointc
;
751 kvm_err("%s: eiointc irqchip not valid!\n", __func__
);
755 addr
-= EIOINTC_VIRT_BASE
;
756 spin_lock_irqsave(&eiointc
->lock
, flags
);
758 case EIOINTC_VIRT_FEATURES
:
761 case EIOINTC_VIRT_CONFIG
:
763 * eiointc features can only be set at disabled status
765 if ((eiointc
->status
& BIT(EIOINTC_ENABLE
)) && value
) {
769 eiointc
->status
= value
& eiointc
->features
;
774 spin_unlock_irqrestore(&eiointc
->lock
, flags
);
779 static const struct kvm_io_device_ops kvm_eiointc_virt_ops
= {
780 .read
= kvm_eiointc_virt_read
,
781 .write
= kvm_eiointc_virt_write
,
784 static int kvm_eiointc_ctrl_access(struct kvm_device
*dev
,
785 struct kvm_device_attr
*attr
)
789 unsigned long type
= (unsigned long)attr
->attr
;
792 struct loongarch_eiointc
*s
= dev
->kvm
->arch
.eiointc
;
794 data
= (void __user
*)attr
->addr
;
795 spin_lock_irqsave(&s
->lock
, flags
);
797 case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU
:
798 if (copy_from_user(&s
->num_cpu
, data
, 4))
801 case KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE
:
802 if (copy_from_user(&s
->features
, data
, 4))
804 if (!(s
->features
& BIT(EIOINTC_HAS_VIRT_EXTENSION
)))
805 s
->status
|= BIT(EIOINTC_ENABLE
);
807 case KVM_DEV_LOONGARCH_EXTIOI_CTRL_LOAD_FINISHED
:
808 eiointc_set_sw_coreisr(s
);
809 for (i
= 0; i
< (EIOINTC_IRQS
/ 4); i
++) {
811 eiointc_update_sw_coremap(s
, start_irq
,
812 (void *)&s
->coremap
.reg_u32
[i
], sizeof(u32
), false);
818 spin_unlock_irqrestore(&s
->lock
, flags
);
823 static int kvm_eiointc_regs_access(struct kvm_device
*dev
,
824 struct kvm_device_attr
*attr
,
827 int addr
, cpuid
, offset
, ret
= 0;
831 struct loongarch_eiointc
*s
;
833 s
= dev
->kvm
->arch
.eiointc
;
837 data
= (void __user
*)attr
->addr
;
839 case EIOINTC_NODETYPE_START
... EIOINTC_NODETYPE_END
:
840 offset
= (addr
- EIOINTC_NODETYPE_START
) / 4;
841 p
= &s
->nodetype
.reg_u32
[offset
];
843 case EIOINTC_IPMAP_START
... EIOINTC_IPMAP_END
:
844 offset
= (addr
- EIOINTC_IPMAP_START
) / 4;
845 p
= &s
->ipmap
.reg_u32
[offset
];
847 case EIOINTC_ENABLE_START
... EIOINTC_ENABLE_END
:
848 offset
= (addr
- EIOINTC_ENABLE_START
) / 4;
849 p
= &s
->enable
.reg_u32
[offset
];
851 case EIOINTC_BOUNCE_START
... EIOINTC_BOUNCE_END
:
852 offset
= (addr
- EIOINTC_BOUNCE_START
) / 4;
853 p
= &s
->bounce
.reg_u32
[offset
];
855 case EIOINTC_ISR_START
... EIOINTC_ISR_END
:
856 offset
= (addr
- EIOINTC_ISR_START
) / 4;
857 p
= &s
->isr
.reg_u32
[offset
];
859 case EIOINTC_COREISR_START
... EIOINTC_COREISR_END
:
860 offset
= (addr
- EIOINTC_COREISR_START
) / 4;
861 p
= &s
->coreisr
.reg_u32
[cpuid
][offset
];
863 case EIOINTC_COREMAP_START
... EIOINTC_COREMAP_END
:
864 offset
= (addr
- EIOINTC_COREMAP_START
) / 4;
865 p
= &s
->coremap
.reg_u32
[offset
];
868 kvm_err("%s: unknown eiointc register, addr = %d\n", __func__
, addr
);
872 spin_lock_irqsave(&s
->lock
, flags
);
874 if (copy_from_user(p
, data
, 4))
877 if (copy_to_user(data
, p
, 4))
880 spin_unlock_irqrestore(&s
->lock
, flags
);
885 static int kvm_eiointc_sw_status_access(struct kvm_device
*dev
,
886 struct kvm_device_attr
*attr
,
893 struct loongarch_eiointc
*s
;
895 s
= dev
->kvm
->arch
.eiointc
;
899 data
= (void __user
*)attr
->addr
;
901 case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU
:
904 case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE
:
907 case KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE
:
911 kvm_err("%s: unknown eiointc register, addr = %d\n", __func__
, addr
);
914 spin_lock_irqsave(&s
->lock
, flags
);
916 if (copy_from_user(p
, data
, 4))
919 if (copy_to_user(data
, p
, 4))
922 spin_unlock_irqrestore(&s
->lock
, flags
);
927 static int kvm_eiointc_get_attr(struct kvm_device
*dev
,
928 struct kvm_device_attr
*attr
)
930 switch (attr
->group
) {
931 case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS
:
932 return kvm_eiointc_regs_access(dev
, attr
, false);
933 case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS
:
934 return kvm_eiointc_sw_status_access(dev
, attr
, false);
940 static int kvm_eiointc_set_attr(struct kvm_device
*dev
,
941 struct kvm_device_attr
*attr
)
943 switch (attr
->group
) {
944 case KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL
:
945 return kvm_eiointc_ctrl_access(dev
, attr
);
946 case KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS
:
947 return kvm_eiointc_regs_access(dev
, attr
, true);
948 case KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS
:
949 return kvm_eiointc_sw_status_access(dev
, attr
, true);
955 static int kvm_eiointc_create(struct kvm_device
*dev
, u32 type
)
958 struct loongarch_eiointc
*s
;
959 struct kvm_io_device
*device
, *device1
;
960 struct kvm
*kvm
= dev
->kvm
;
962 /* eiointc has been created */
963 if (kvm
->arch
.eiointc
)
966 s
= kzalloc(sizeof(struct loongarch_eiointc
), GFP_KERNEL
);
970 spin_lock_init(&s
->lock
);
974 * Initialize IOCSR device
977 kvm_iodevice_init(device
, &kvm_eiointc_ops
);
978 mutex_lock(&kvm
->slots_lock
);
979 ret
= kvm_io_bus_register_dev(kvm
, KVM_IOCSR_BUS
,
980 EIOINTC_BASE
, EIOINTC_SIZE
, device
);
981 mutex_unlock(&kvm
->slots_lock
);
987 device1
= &s
->device_vext
;
988 kvm_iodevice_init(device1
, &kvm_eiointc_virt_ops
);
989 ret
= kvm_io_bus_register_dev(kvm
, KVM_IOCSR_BUS
,
990 EIOINTC_VIRT_BASE
, EIOINTC_VIRT_SIZE
, device1
);
992 kvm_io_bus_unregister_dev(kvm
, KVM_IOCSR_BUS
, &s
->device
);
996 kvm
->arch
.eiointc
= s
;
1001 static void kvm_eiointc_destroy(struct kvm_device
*dev
)
1004 struct loongarch_eiointc
*eiointc
;
1006 if (!dev
|| !dev
->kvm
|| !dev
->kvm
->arch
.eiointc
)
1010 eiointc
= kvm
->arch
.eiointc
;
1011 kvm_io_bus_unregister_dev(kvm
, KVM_IOCSR_BUS
, &eiointc
->device
);
1012 kvm_io_bus_unregister_dev(kvm
, KVM_IOCSR_BUS
, &eiointc
->device_vext
);
1016 static struct kvm_device_ops kvm_eiointc_dev_ops
= {
1017 .name
= "kvm-loongarch-eiointc",
1018 .create
= kvm_eiointc_create
,
1019 .destroy
= kvm_eiointc_destroy
,
1020 .set_attr
= kvm_eiointc_set_attr
,
1021 .get_attr
= kvm_eiointc_get_attr
,
1024 int kvm_loongarch_register_eiointc_device(void)
1026 return kvm_register_device_ops(&kvm_eiointc_dev_ops
, KVM_DEV_TYPE_LOONGARCH_EIOINTC
);