4 * Copyright (c) 2004 Jocelyn Mayer
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include <linux/slab.h>
27 #include <linux/mutex.h>
28 #include <linux/kvm_host.h>
29 #include <linux/errno.h>
31 #include <linux/anon_inodes.h>
32 #include <linux/uaccess.h>
34 #include <asm/kvm_para.h>
35 #include <asm/kvm_ppc.h>
36 #include <kvm/iodev.h>
43 #define MAX_IRQ (MAX_SRC + MAX_IPI + MAX_TMR)
44 #define VID 0x03 /* MPIC version ID */
46 /* OpenPIC capability flags */
47 #define OPENPIC_FLAG_IDR_CRIT (1 << 0)
48 #define OPENPIC_FLAG_ILR (2 << 0)
50 /* OpenPIC address map */
51 #define OPENPIC_REG_SIZE 0x40000
52 #define OPENPIC_GLB_REG_START 0x0
53 #define OPENPIC_GLB_REG_SIZE 0x10F0
54 #define OPENPIC_TMR_REG_START 0x10F0
55 #define OPENPIC_TMR_REG_SIZE 0x220
56 #define OPENPIC_MSI_REG_START 0x1600
57 #define OPENPIC_MSI_REG_SIZE 0x200
58 #define OPENPIC_SUMMARY_REG_START 0x3800
59 #define OPENPIC_SUMMARY_REG_SIZE 0x800
60 #define OPENPIC_SRC_REG_START 0x10000
61 #define OPENPIC_SRC_REG_SIZE (MAX_SRC * 0x20)
62 #define OPENPIC_CPU_REG_START 0x20000
63 #define OPENPIC_CPU_REG_SIZE (0x100 + ((MAX_CPU - 1) * 0x1000))
65 struct fsl_mpic_info
{
69 static struct fsl_mpic_info fsl_mpic_20
= {
73 static struct fsl_mpic_info fsl_mpic_42
= {
77 #define FRR_NIRQ_SHIFT 16
78 #define FRR_NCPU_SHIFT 8
79 #define FRR_VID_SHIFT 0
81 #define VID_REVISION_1_2 2
82 #define VID_REVISION_1_3 3
84 #define VIR_GENERIC 0x00000000 /* Generic Vendor ID */
86 #define GCR_RESET 0x80000000
87 #define GCR_MODE_PASS 0x00000000
88 #define GCR_MODE_MIXED 0x20000000
89 #define GCR_MODE_PROXY 0x60000000
91 #define TBCR_CI 0x80000000 /* count inhibit */
92 #define TCCR_TOG 0x80000000 /* toggles when decrement to zero */
94 #define IDR_EP_SHIFT 31
95 #define IDR_EP_MASK (1 << IDR_EP_SHIFT)
96 #define IDR_CI0_SHIFT 30
97 #define IDR_CI1_SHIFT 29
98 #define IDR_P1_SHIFT 1
99 #define IDR_P0_SHIFT 0
101 #define ILR_INTTGT_MASK 0x000000ff
102 #define ILR_INTTGT_INT 0x00
103 #define ILR_INTTGT_CINT 0x01 /* critical */
104 #define ILR_INTTGT_MCP 0x02 /* machine check */
105 #define NUM_OUTPUTS 3
107 #define MSIIR_OFFSET 0x140
108 #define MSIIR_SRS_SHIFT 29
109 #define MSIIR_SRS_MASK (0x7 << MSIIR_SRS_SHIFT)
110 #define MSIIR_IBS_SHIFT 24
111 #define MSIIR_IBS_MASK (0x1f << MSIIR_IBS_SHIFT)
113 static int get_current_cpu(void)
115 #if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
116 struct kvm_vcpu
*vcpu
= current
->thread
.kvm_vcpu
;
117 return vcpu
? vcpu
->arch
.irq_cpu_id
: -1;
124 static int openpic_cpu_write_internal(void *opaque
, gpa_t addr
,
126 static int openpic_cpu_read_internal(void *opaque
, gpa_t addr
,
128 static inline void write_IRQreg_idr(struct openpic
*opp
, int n_IRQ
,
133 IRQ_TYPE_FSLINT
, /* FSL internal interrupt -- level only */
134 IRQ_TYPE_FSLSPECIAL
, /* FSL timer/IPI interrupt, edge, no polarity */
138 /* Round up to the nearest 64 IRQs so that the queue length
139 * won't change when moving between 32 and 64 bit hosts.
141 unsigned long queue
[BITS_TO_LONGS((MAX_IRQ
+ 63) & ~63)];
147 uint32_t ivpr
; /* IRQ vector/priority register */
148 uint32_t idr
; /* IRQ destination register */
149 uint32_t destmask
; /* bitmap of CPU destinations */
151 int output
; /* IRQ level, e.g. ILR_INTTGT_INT */
152 int pending
; /* TRUE if IRQ is pending */
154 bool level
:1; /* level-triggered */
155 bool nomask
:1; /* critical interrupts ignore mask on some FSL MPICs */
158 #define IVPR_MASK_SHIFT 31
159 #define IVPR_MASK_MASK (1 << IVPR_MASK_SHIFT)
160 #define IVPR_ACTIVITY_SHIFT 30
161 #define IVPR_ACTIVITY_MASK (1 << IVPR_ACTIVITY_SHIFT)
162 #define IVPR_MODE_SHIFT 29
163 #define IVPR_MODE_MASK (1 << IVPR_MODE_SHIFT)
164 #define IVPR_POLARITY_SHIFT 23
165 #define IVPR_POLARITY_MASK (1 << IVPR_POLARITY_SHIFT)
166 #define IVPR_SENSE_SHIFT 22
167 #define IVPR_SENSE_MASK (1 << IVPR_SENSE_SHIFT)
169 #define IVPR_PRIORITY_MASK (0xF << 16)
170 #define IVPR_PRIORITY(_ivprr_) ((int)(((_ivprr_) & IVPR_PRIORITY_MASK) >> 16))
171 #define IVPR_VECTOR(opp, _ivprr_) ((_ivprr_) & (opp)->vector_mask)
173 /* IDR[EP/CI] are only for FSL MPIC prior to v4.0 */
174 #define IDR_EP 0x80000000 /* external pin */
175 #define IDR_CI 0x40000000 /* critical interrupt */
178 struct kvm_vcpu
*vcpu
;
180 int32_t ctpr
; /* CPU current task priority */
181 struct irq_queue raised
;
182 struct irq_queue servicing
;
184 /* Count of IRQ sources asserting on non-INT outputs */
185 uint32_t outputs_active
[NUM_OUTPUTS
];
188 #define MAX_MMIO_REGIONS 10
192 struct kvm_device
*dev
;
193 struct kvm_io_device mmio
;
194 const struct mem_reg
*mmio_regions
[MAX_MMIO_REGIONS
];
195 int num_mmio_regions
;
200 /* Behavior control */
201 struct fsl_mpic_info
*fsl
;
206 uint32_t vir
; /* Vendor identification register */
207 uint32_t vector_mask
;
212 uint32_t mpic_mode_mask
;
214 /* Global registers */
215 uint32_t frr
; /* Feature reporting register */
216 uint32_t gcr
; /* Global configuration register */
217 uint32_t pir
; /* Processor initialization register */
218 uint32_t spve
; /* Spurious vector register */
219 uint32_t tfrr
; /* Timer frequency reporting register */
220 /* Source registers */
221 struct irq_source src
[MAX_IRQ
];
222 /* Local registers per output pin */
223 struct irq_dest dst
[MAX_CPU
];
225 /* Timer registers */
227 uint32_t tccr
; /* Global timer current count register */
228 uint32_t tbcr
; /* Global timer base count register */
230 /* Shared MSI registers */
232 uint32_t msir
; /* Shared Message Signaled Interrupt Register */
241 static void mpic_irq_raise(struct openpic
*opp
, struct irq_dest
*dst
,
244 struct kvm_interrupt irq
= {
245 .irq
= KVM_INTERRUPT_SET_LEVEL
,
249 pr_debug("%s: destination cpu %d does not exist\n",
250 __func__
, (int)(dst
- &opp
->dst
[0]));
254 pr_debug("%s: cpu %d output %d\n", __func__
, dst
->vcpu
->arch
.irq_cpu_id
,
257 if (output
!= ILR_INTTGT_INT
) /* TODO */
260 kvm_vcpu_ioctl_interrupt(dst
->vcpu
, &irq
);
263 static void mpic_irq_lower(struct openpic
*opp
, struct irq_dest
*dst
,
267 pr_debug("%s: destination cpu %d does not exist\n",
268 __func__
, (int)(dst
- &opp
->dst
[0]));
272 pr_debug("%s: cpu %d output %d\n", __func__
, dst
->vcpu
->arch
.irq_cpu_id
,
275 if (output
!= ILR_INTTGT_INT
) /* TODO */
278 kvmppc_core_dequeue_external(dst
->vcpu
);
281 static inline void IRQ_setbit(struct irq_queue
*q
, int n_IRQ
)
283 set_bit(n_IRQ
, q
->queue
);
286 static inline void IRQ_resetbit(struct irq_queue
*q
, int n_IRQ
)
288 clear_bit(n_IRQ
, q
->queue
);
291 static void IRQ_check(struct openpic
*opp
, struct irq_queue
*q
)
298 irq
= find_next_bit(q
->queue
, opp
->max_irq
, irq
+ 1);
299 if (irq
== opp
->max_irq
)
302 pr_debug("IRQ_check: irq %d set ivpr_pr=%d pr=%d\n",
303 irq
, IVPR_PRIORITY(opp
->src
[irq
].ivpr
), priority
);
305 if (IVPR_PRIORITY(opp
->src
[irq
].ivpr
) > priority
) {
307 priority
= IVPR_PRIORITY(opp
->src
[irq
].ivpr
);
312 q
->priority
= priority
;
315 static int IRQ_get_next(struct openpic
*opp
, struct irq_queue
*q
)
323 static void IRQ_local_pipe(struct openpic
*opp
, int n_CPU
, int n_IRQ
,
324 bool active
, bool was_active
)
326 struct irq_dest
*dst
;
327 struct irq_source
*src
;
330 dst
= &opp
->dst
[n_CPU
];
331 src
= &opp
->src
[n_IRQ
];
333 pr_debug("%s: IRQ %d active %d was %d\n",
334 __func__
, n_IRQ
, active
, was_active
);
336 if (src
->output
!= ILR_INTTGT_INT
) {
337 pr_debug("%s: output %d irq %d active %d was %d count %d\n",
338 __func__
, src
->output
, n_IRQ
, active
, was_active
,
339 dst
->outputs_active
[src
->output
]);
341 /* On Freescale MPIC, critical interrupts ignore priority,
342 * IACK, EOI, etc. Before MPIC v4.1 they also ignore
347 dst
->outputs_active
[src
->output
]++ == 0) {
348 pr_debug("%s: Raise OpenPIC output %d cpu %d irq %d\n",
349 __func__
, src
->output
, n_CPU
, n_IRQ
);
350 mpic_irq_raise(opp
, dst
, src
->output
);
354 --dst
->outputs_active
[src
->output
] == 0) {
355 pr_debug("%s: Lower OpenPIC output %d cpu %d irq %d\n",
356 __func__
, src
->output
, n_CPU
, n_IRQ
);
357 mpic_irq_lower(opp
, dst
, src
->output
);
364 priority
= IVPR_PRIORITY(src
->ivpr
);
366 /* Even if the interrupt doesn't have enough priority,
367 * it is still raised, in case ctpr is lowered later.
370 IRQ_setbit(&dst
->raised
, n_IRQ
);
372 IRQ_resetbit(&dst
->raised
, n_IRQ
);
374 IRQ_check(opp
, &dst
->raised
);
376 if (active
&& priority
<= dst
->ctpr
) {
377 pr_debug("%s: IRQ %d priority %d too low for ctpr %d on CPU %d\n",
378 __func__
, n_IRQ
, priority
, dst
->ctpr
, n_CPU
);
383 if (IRQ_get_next(opp
, &dst
->servicing
) >= 0 &&
384 priority
<= dst
->servicing
.priority
) {
385 pr_debug("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d\n",
386 __func__
, n_IRQ
, dst
->servicing
.next
, n_CPU
);
388 pr_debug("%s: Raise OpenPIC INT output cpu %d irq %d/%d\n",
389 __func__
, n_CPU
, n_IRQ
, dst
->raised
.next
);
390 mpic_irq_raise(opp
, dst
, ILR_INTTGT_INT
);
393 IRQ_get_next(opp
, &dst
->servicing
);
394 if (dst
->raised
.priority
> dst
->ctpr
&&
395 dst
->raised
.priority
> dst
->servicing
.priority
) {
396 pr_debug("%s: IRQ %d inactive, IRQ %d prio %d above %d/%d, CPU %d\n",
397 __func__
, n_IRQ
, dst
->raised
.next
,
398 dst
->raised
.priority
, dst
->ctpr
,
399 dst
->servicing
.priority
, n_CPU
);
400 /* IRQ line stays asserted */
402 pr_debug("%s: IRQ %d inactive, current prio %d/%d, CPU %d\n",
403 __func__
, n_IRQ
, dst
->ctpr
,
404 dst
->servicing
.priority
, n_CPU
);
405 mpic_irq_lower(opp
, dst
, ILR_INTTGT_INT
);
410 /* update pic state because registers for n_IRQ have changed value */
411 static void openpic_update_irq(struct openpic
*opp
, int n_IRQ
)
413 struct irq_source
*src
;
414 bool active
, was_active
;
417 src
= &opp
->src
[n_IRQ
];
418 active
= src
->pending
;
420 if ((src
->ivpr
& IVPR_MASK_MASK
) && !src
->nomask
) {
421 /* Interrupt source is disabled */
422 pr_debug("%s: IRQ %d is disabled\n", __func__
, n_IRQ
);
426 was_active
= !!(src
->ivpr
& IVPR_ACTIVITY_MASK
);
429 * We don't have a similar check for already-active because
430 * ctpr may have changed and we need to withdraw the interrupt.
432 if (!active
&& !was_active
) {
433 pr_debug("%s: IRQ %d is already inactive\n", __func__
, n_IRQ
);
438 src
->ivpr
|= IVPR_ACTIVITY_MASK
;
440 src
->ivpr
&= ~IVPR_ACTIVITY_MASK
;
442 if (src
->destmask
== 0) {
444 pr_debug("%s: IRQ %d has no target\n", __func__
, n_IRQ
);
448 if (src
->destmask
== (1 << src
->last_cpu
)) {
449 /* Only one CPU is allowed to receive this IRQ */
450 IRQ_local_pipe(opp
, src
->last_cpu
, n_IRQ
, active
, was_active
);
451 } else if (!(src
->ivpr
& IVPR_MODE_MASK
)) {
452 /* Directed delivery mode */
453 for (i
= 0; i
< opp
->nb_cpus
; i
++) {
454 if (src
->destmask
& (1 << i
)) {
455 IRQ_local_pipe(opp
, i
, n_IRQ
, active
,
460 /* Distributed delivery mode */
461 for (i
= src
->last_cpu
+ 1; i
!= src
->last_cpu
; i
++) {
462 if (i
== opp
->nb_cpus
)
465 if (src
->destmask
& (1 << i
)) {
466 IRQ_local_pipe(opp
, i
, n_IRQ
, active
,
475 static void openpic_set_irq(void *opaque
, int n_IRQ
, int level
)
477 struct openpic
*opp
= opaque
;
478 struct irq_source
*src
;
480 if (n_IRQ
>= MAX_IRQ
) {
481 WARN_ONCE(1, "%s: IRQ %d out of range\n", __func__
, n_IRQ
);
485 src
= &opp
->src
[n_IRQ
];
486 pr_debug("openpic: set irq %d = %d ivpr=0x%08x\n",
487 n_IRQ
, level
, src
->ivpr
);
489 /* level-sensitive irq */
490 src
->pending
= level
;
491 openpic_update_irq(opp
, n_IRQ
);
493 /* edge-sensitive irq */
496 openpic_update_irq(opp
, n_IRQ
);
499 if (src
->output
!= ILR_INTTGT_INT
) {
500 /* Edge-triggered interrupts shouldn't be used
501 * with non-INT delivery, but just in case,
502 * try to make it do something sane rather than
503 * cause an interrupt storm. This is close to
504 * what you'd probably see happen in real hardware.
507 openpic_update_irq(opp
, n_IRQ
);
512 static void openpic_reset(struct openpic
*opp
)
516 opp
->gcr
= GCR_RESET
;
517 /* Initialise controller registers */
518 opp
->frr
= ((opp
->nb_irqs
- 1) << FRR_NIRQ_SHIFT
) |
519 (opp
->vid
<< FRR_VID_SHIFT
);
522 opp
->spve
= -1 & opp
->vector_mask
;
523 opp
->tfrr
= opp
->tfrr_reset
;
524 /* Initialise IRQ sources */
525 for (i
= 0; i
< opp
->max_irq
; i
++) {
526 opp
->src
[i
].ivpr
= opp
->ivpr_reset
;
528 switch (opp
->src
[i
].type
) {
529 case IRQ_TYPE_NORMAL
:
531 !!(opp
->ivpr_reset
& IVPR_SENSE_MASK
);
534 case IRQ_TYPE_FSLINT
:
535 opp
->src
[i
].ivpr
|= IVPR_POLARITY_MASK
;
538 case IRQ_TYPE_FSLSPECIAL
:
542 write_IRQreg_idr(opp
, i
, opp
->idr_reset
);
544 /* Initialise IRQ destinations */
545 for (i
= 0; i
< MAX_CPU
; i
++) {
546 opp
->dst
[i
].ctpr
= 15;
547 memset(&opp
->dst
[i
].raised
, 0, sizeof(struct irq_queue
));
548 opp
->dst
[i
].raised
.next
= -1;
549 memset(&opp
->dst
[i
].servicing
, 0, sizeof(struct irq_queue
));
550 opp
->dst
[i
].servicing
.next
= -1;
552 /* Initialise timers */
553 for (i
= 0; i
< MAX_TMR
; i
++) {
554 opp
->timers
[i
].tccr
= 0;
555 opp
->timers
[i
].tbcr
= TBCR_CI
;
557 /* Go out of RESET state */
561 static inline uint32_t read_IRQreg_idr(struct openpic
*opp
, int n_IRQ
)
563 return opp
->src
[n_IRQ
].idr
;
566 static inline uint32_t read_IRQreg_ilr(struct openpic
*opp
, int n_IRQ
)
568 if (opp
->flags
& OPENPIC_FLAG_ILR
)
569 return opp
->src
[n_IRQ
].output
;
574 static inline uint32_t read_IRQreg_ivpr(struct openpic
*opp
, int n_IRQ
)
576 return opp
->src
[n_IRQ
].ivpr
;
579 static inline void write_IRQreg_idr(struct openpic
*opp
, int n_IRQ
,
582 struct irq_source
*src
= &opp
->src
[n_IRQ
];
583 uint32_t normal_mask
= (1UL << opp
->nb_cpus
) - 1;
584 uint32_t crit_mask
= 0;
585 uint32_t mask
= normal_mask
;
586 int crit_shift
= IDR_EP_SHIFT
- opp
->nb_cpus
;
589 if (opp
->flags
& OPENPIC_FLAG_IDR_CRIT
) {
590 crit_mask
= mask
<< crit_shift
;
591 mask
|= crit_mask
| IDR_EP
;
594 src
->idr
= val
& mask
;
595 pr_debug("Set IDR %d to 0x%08x\n", n_IRQ
, src
->idr
);
597 if (opp
->flags
& OPENPIC_FLAG_IDR_CRIT
) {
598 if (src
->idr
& crit_mask
) {
599 if (src
->idr
& normal_mask
) {
600 pr_debug("%s: IRQ configured for multiple output types, using critical\n",
604 src
->output
= ILR_INTTGT_CINT
;
608 for (i
= 0; i
< opp
->nb_cpus
; i
++) {
609 int n_ci
= IDR_CI0_SHIFT
- i
;
611 if (src
->idr
& (1UL << n_ci
))
612 src
->destmask
|= 1UL << i
;
615 src
->output
= ILR_INTTGT_INT
;
617 src
->destmask
= src
->idr
& normal_mask
;
620 src
->destmask
= src
->idr
;
624 static inline void write_IRQreg_ilr(struct openpic
*opp
, int n_IRQ
,
627 if (opp
->flags
& OPENPIC_FLAG_ILR
) {
628 struct irq_source
*src
= &opp
->src
[n_IRQ
];
630 src
->output
= val
& ILR_INTTGT_MASK
;
631 pr_debug("Set ILR %d to 0x%08x, output %d\n", n_IRQ
, src
->idr
,
634 /* TODO: on MPIC v4.0 only, set nomask for non-INT */
638 static inline void write_IRQreg_ivpr(struct openpic
*opp
, int n_IRQ
,
643 /* NOTE when implementing newer FSL MPIC models: starting with v4.0,
644 * the polarity bit is read-only on internal interrupts.
646 mask
= IVPR_MASK_MASK
| IVPR_PRIORITY_MASK
| IVPR_SENSE_MASK
|
647 IVPR_POLARITY_MASK
| opp
->vector_mask
;
649 /* ACTIVITY bit is read-only */
650 opp
->src
[n_IRQ
].ivpr
=
651 (opp
->src
[n_IRQ
].ivpr
& IVPR_ACTIVITY_MASK
) | (val
& mask
);
653 /* For FSL internal interrupts, The sense bit is reserved and zero,
654 * and the interrupt is always level-triggered. Timers and IPIs
655 * have no sense or polarity bits, and are edge-triggered.
657 switch (opp
->src
[n_IRQ
].type
) {
658 case IRQ_TYPE_NORMAL
:
659 opp
->src
[n_IRQ
].level
=
660 !!(opp
->src
[n_IRQ
].ivpr
& IVPR_SENSE_MASK
);
663 case IRQ_TYPE_FSLINT
:
664 opp
->src
[n_IRQ
].ivpr
&= ~IVPR_SENSE_MASK
;
667 case IRQ_TYPE_FSLSPECIAL
:
668 opp
->src
[n_IRQ
].ivpr
&= ~(IVPR_POLARITY_MASK
| IVPR_SENSE_MASK
);
672 openpic_update_irq(opp
, n_IRQ
);
673 pr_debug("Set IVPR %d to 0x%08x -> 0x%08x\n", n_IRQ
, val
,
674 opp
->src
[n_IRQ
].ivpr
);
677 static void openpic_gcr_write(struct openpic
*opp
, uint64_t val
)
679 if (val
& GCR_RESET
) {
684 opp
->gcr
&= ~opp
->mpic_mode_mask
;
685 opp
->gcr
|= val
& opp
->mpic_mode_mask
;
688 static int openpic_gbl_write(void *opaque
, gpa_t addr
, u32 val
)
690 struct openpic
*opp
= opaque
;
693 pr_debug("%s: addr %#llx <= %08x\n", __func__
, addr
, val
);
698 case 0x00: /* Block Revision Register1 (BRR1) is Readonly */
708 err
= openpic_cpu_write_internal(opp
, addr
, val
,
711 case 0x1000: /* FRR */
713 case 0x1020: /* GCR */
714 openpic_gcr_write(opp
, val
);
716 case 0x1080: /* VIR */
718 case 0x1090: /* PIR */
720 * This register is used to reset a CPU core --
721 * let userspace handle it.
725 case 0x10A0: /* IPI_IVPR */
730 idx
= (addr
- 0x10A0) >> 4;
731 write_IRQreg_ivpr(opp
, opp
->irq_ipi0
+ idx
, val
);
734 case 0x10E0: /* SPVE */
735 opp
->spve
= val
& opp
->vector_mask
;
744 static int openpic_gbl_read(void *opaque
, gpa_t addr
, u32
*ptr
)
746 struct openpic
*opp
= opaque
;
750 pr_debug("%s: addr %#llx\n", __func__
, addr
);
756 case 0x1000: /* FRR */
758 retval
|= (opp
->nb_cpus
- 1) << FRR_NCPU_SHIFT
;
760 case 0x1020: /* GCR */
763 case 0x1080: /* VIR */
766 case 0x1090: /* PIR */
769 case 0x00: /* Block Revision Register1 (BRR1) */
780 err
= openpic_cpu_read_internal(opp
, addr
,
781 &retval
, get_current_cpu());
783 case 0x10A0: /* IPI_IVPR */
789 idx
= (addr
- 0x10A0) >> 4;
790 retval
= read_IRQreg_ivpr(opp
, opp
->irq_ipi0
+ idx
);
793 case 0x10E0: /* SPVE */
801 pr_debug("%s: => 0x%08x\n", __func__
, retval
);
806 static int openpic_tmr_write(void *opaque
, gpa_t addr
, u32 val
)
808 struct openpic
*opp
= opaque
;
813 pr_debug("%s: addr %#llx <= %08x\n", __func__
, addr
, val
);
817 if (addr
== 0x10f0) {
823 idx
= (addr
>> 6) & 0x3;
826 switch (addr
& 0x30) {
827 case 0x00: /* TCCR */
829 case 0x10: /* TBCR */
830 if ((opp
->timers
[idx
].tccr
& TCCR_TOG
) != 0 &&
831 (val
& TBCR_CI
) == 0 &&
832 (opp
->timers
[idx
].tbcr
& TBCR_CI
) != 0)
833 opp
->timers
[idx
].tccr
&= ~TCCR_TOG
;
835 opp
->timers
[idx
].tbcr
= val
;
837 case 0x20: /* TVPR */
838 write_IRQreg_ivpr(opp
, opp
->irq_tim0
+ idx
, val
);
841 write_IRQreg_idr(opp
, opp
->irq_tim0
+ idx
, val
);
848 static int openpic_tmr_read(void *opaque
, gpa_t addr
, u32
*ptr
)
850 struct openpic
*opp
= opaque
;
851 uint32_t retval
= -1;
854 pr_debug("%s: addr %#llx\n", __func__
, addr
);
858 idx
= (addr
>> 6) & 0x3;
865 switch (addr
& 0x30) {
866 case 0x00: /* TCCR */
867 retval
= opp
->timers
[idx
].tccr
;
869 case 0x10: /* TBCR */
870 retval
= opp
->timers
[idx
].tbcr
;
872 case 0x20: /* TIPV */
873 retval
= read_IRQreg_ivpr(opp
, opp
->irq_tim0
+ idx
);
875 case 0x30: /* TIDE (TIDR) */
876 retval
= read_IRQreg_idr(opp
, opp
->irq_tim0
+ idx
);
881 pr_debug("%s: => 0x%08x\n", __func__
, retval
);
886 static int openpic_src_write(void *opaque
, gpa_t addr
, u32 val
)
888 struct openpic
*opp
= opaque
;
891 pr_debug("%s: addr %#llx <= %08x\n", __func__
, addr
, val
);
893 addr
= addr
& 0xffff;
896 switch (addr
& 0x1f) {
898 write_IRQreg_ivpr(opp
, idx
, val
);
901 write_IRQreg_idr(opp
, idx
, val
);
904 write_IRQreg_ilr(opp
, idx
, val
);
911 static int openpic_src_read(void *opaque
, gpa_t addr
, u32
*ptr
)
913 struct openpic
*opp
= opaque
;
917 pr_debug("%s: addr %#llx\n", __func__
, addr
);
920 addr
= addr
& 0xffff;
923 switch (addr
& 0x1f) {
925 retval
= read_IRQreg_ivpr(opp
, idx
);
928 retval
= read_IRQreg_idr(opp
, idx
);
931 retval
= read_IRQreg_ilr(opp
, idx
);
935 pr_debug("%s: => 0x%08x\n", __func__
, retval
);
940 static int openpic_msi_write(void *opaque
, gpa_t addr
, u32 val
)
942 struct openpic
*opp
= opaque
;
943 int idx
= opp
->irq_msi
;
946 pr_debug("%s: addr %#llx <= 0x%08x\n", __func__
, addr
, val
);
952 srs
= val
>> MSIIR_SRS_SHIFT
;
954 ibs
= (val
& MSIIR_IBS_MASK
) >> MSIIR_IBS_SHIFT
;
955 opp
->msi
[srs
].msir
|= 1 << ibs
;
956 openpic_set_irq(opp
, idx
, 1);
959 /* most registers are read-only, thus ignored */
966 static int openpic_msi_read(void *opaque
, gpa_t addr
, u32
*ptr
)
968 struct openpic
*opp
= opaque
;
972 pr_debug("%s: addr %#llx\n", __func__
, addr
);
986 case 0x70: /* MSIRs */
987 r
= opp
->msi
[srs
].msir
;
989 opp
->msi
[srs
].msir
= 0;
990 openpic_set_irq(opp
, opp
->irq_msi
+ srs
, 0);
992 case 0x120: /* MSISR */
993 for (i
= 0; i
< MAX_MSI
; i
++)
994 r
|= (opp
->msi
[i
].msir
? 1 : 0) << i
;
998 pr_debug("%s: => 0x%08x\n", __func__
, r
);
1003 static int openpic_summary_read(void *opaque
, gpa_t addr
, u32
*ptr
)
1007 pr_debug("%s: addr %#llx\n", __func__
, addr
);
1009 /* TODO: EISR/EIMR */
1015 static int openpic_summary_write(void *opaque
, gpa_t addr
, u32 val
)
1017 pr_debug("%s: addr %#llx <= 0x%08x\n", __func__
, addr
, val
);
1019 /* TODO: EISR/EIMR */
1023 static int openpic_cpu_write_internal(void *opaque
, gpa_t addr
,
1026 struct openpic
*opp
= opaque
;
1027 struct irq_source
*src
;
1028 struct irq_dest
*dst
;
1031 pr_debug("%s: cpu %d addr %#llx <= 0x%08x\n", __func__
, idx
,
1040 dst
= &opp
->dst
[idx
];
1043 case 0x40: /* IPIDR */
1047 idx
= (addr
- 0x40) >> 4;
1048 /* we use IDE as mask which CPUs to deliver the IPI to still. */
1049 opp
->src
[opp
->irq_ipi0
+ idx
].destmask
|= val
;
1050 openpic_set_irq(opp
, opp
->irq_ipi0
+ idx
, 1);
1051 openpic_set_irq(opp
, opp
->irq_ipi0
+ idx
, 0);
1053 case 0x80: /* CTPR */
1054 dst
->ctpr
= val
& 0x0000000F;
1056 pr_debug("%s: set CPU %d ctpr to %d, raised %d servicing %d\n",
1057 __func__
, idx
, dst
->ctpr
, dst
->raised
.priority
,
1058 dst
->servicing
.priority
);
1060 if (dst
->raised
.priority
<= dst
->ctpr
) {
1061 pr_debug("%s: Lower OpenPIC INT output cpu %d due to ctpr\n",
1063 mpic_irq_lower(opp
, dst
, ILR_INTTGT_INT
);
1064 } else if (dst
->raised
.priority
> dst
->servicing
.priority
) {
1065 pr_debug("%s: Raise OpenPIC INT output cpu %d irq %d\n",
1066 __func__
, idx
, dst
->raised
.next
);
1067 mpic_irq_raise(opp
, dst
, ILR_INTTGT_INT
);
1071 case 0x90: /* WHOAMI */
1072 /* Read-only register */
1074 case 0xA0: /* IACK */
1075 /* Read-only register */
1077 case 0xB0: { /* EOI */
1081 s_IRQ
= IRQ_get_next(opp
, &dst
->servicing
);
1084 pr_debug("%s: EOI with no interrupt in service\n",
1089 IRQ_resetbit(&dst
->servicing
, s_IRQ
);
1090 /* Notify listeners that the IRQ is over */
1092 /* Set up next servicing IRQ */
1093 s_IRQ
= IRQ_get_next(opp
, &dst
->servicing
);
1094 /* Check queued interrupts. */
1095 n_IRQ
= IRQ_get_next(opp
, &dst
->raised
);
1096 src
= &opp
->src
[n_IRQ
];
1099 IVPR_PRIORITY(src
->ivpr
) > dst
->servicing
.priority
)) {
1100 pr_debug("Raise OpenPIC INT output cpu %d irq %d\n",
1102 mpic_irq_raise(opp
, dst
, ILR_INTTGT_INT
);
1105 spin_unlock(&opp
->lock
);
1106 kvm_notify_acked_irq(opp
->kvm
, 0, notify_eoi
);
1107 spin_lock(&opp
->lock
);
1118 static int openpic_cpu_write(void *opaque
, gpa_t addr
, u32 val
)
1120 struct openpic
*opp
= opaque
;
1122 return openpic_cpu_write_internal(opp
, addr
, val
,
1123 (addr
& 0x1f000) >> 12);
1126 static uint32_t openpic_iack(struct openpic
*opp
, struct irq_dest
*dst
,
1129 struct irq_source
*src
;
1132 pr_debug("Lower OpenPIC INT output\n");
1133 mpic_irq_lower(opp
, dst
, ILR_INTTGT_INT
);
1135 irq
= IRQ_get_next(opp
, &dst
->raised
);
1136 pr_debug("IACK: irq=%d\n", irq
);
1139 /* No more interrupt pending */
1142 src
= &opp
->src
[irq
];
1143 if (!(src
->ivpr
& IVPR_ACTIVITY_MASK
) ||
1144 !(IVPR_PRIORITY(src
->ivpr
) > dst
->ctpr
)) {
1145 pr_err("%s: bad raised IRQ %d ctpr %d ivpr 0x%08x\n",
1146 __func__
, irq
, dst
->ctpr
, src
->ivpr
);
1147 openpic_update_irq(opp
, irq
);
1150 /* IRQ enter servicing state */
1151 IRQ_setbit(&dst
->servicing
, irq
);
1152 retval
= IVPR_VECTOR(opp
, src
->ivpr
);
1156 /* edge-sensitive IRQ */
1157 src
->ivpr
&= ~IVPR_ACTIVITY_MASK
;
1159 IRQ_resetbit(&dst
->raised
, irq
);
1162 if ((irq
>= opp
->irq_ipi0
) && (irq
< (opp
->irq_ipi0
+ MAX_IPI
))) {
1163 src
->destmask
&= ~(1 << cpu
);
1164 if (src
->destmask
&& !src
->level
) {
1165 /* trigger on CPUs that didn't know about it yet */
1166 openpic_set_irq(opp
, irq
, 1);
1167 openpic_set_irq(opp
, irq
, 0);
1168 /* if all CPUs knew about it, set active bit again */
1169 src
->ivpr
|= IVPR_ACTIVITY_MASK
;
1176 void kvmppc_mpic_set_epr(struct kvm_vcpu
*vcpu
)
1178 struct openpic
*opp
= vcpu
->arch
.mpic
;
1179 int cpu
= vcpu
->arch
.irq_cpu_id
;
1180 unsigned long flags
;
1182 spin_lock_irqsave(&opp
->lock
, flags
);
1184 if ((opp
->gcr
& opp
->mpic_mode_mask
) == GCR_MODE_PROXY
)
1185 kvmppc_set_epr(vcpu
, openpic_iack(opp
, &opp
->dst
[cpu
], cpu
));
1187 spin_unlock_irqrestore(&opp
->lock
, flags
);
1190 static int openpic_cpu_read_internal(void *opaque
, gpa_t addr
,
1193 struct openpic
*opp
= opaque
;
1194 struct irq_dest
*dst
;
1197 pr_debug("%s: cpu %d addr %#llx\n", __func__
, idx
, addr
);
1198 retval
= 0xFFFFFFFF;
1206 dst
= &opp
->dst
[idx
];
1209 case 0x80: /* CTPR */
1212 case 0x90: /* WHOAMI */
1215 case 0xA0: /* IACK */
1216 retval
= openpic_iack(opp
, dst
, idx
);
1218 case 0xB0: /* EOI */
1224 pr_debug("%s: => 0x%08x\n", __func__
, retval
);
1231 static int openpic_cpu_read(void *opaque
, gpa_t addr
, u32
*ptr
)
1233 struct openpic
*opp
= opaque
;
1235 return openpic_cpu_read_internal(opp
, addr
, ptr
,
1236 (addr
& 0x1f000) >> 12);
1240 int (*read
)(void *opaque
, gpa_t addr
, u32
*ptr
);
1241 int (*write
)(void *opaque
, gpa_t addr
, u32 val
);
1246 static const struct mem_reg openpic_gbl_mmio
= {
1247 .write
= openpic_gbl_write
,
1248 .read
= openpic_gbl_read
,
1249 .start_addr
= OPENPIC_GLB_REG_START
,
1250 .size
= OPENPIC_GLB_REG_SIZE
,
1253 static const struct mem_reg openpic_tmr_mmio
= {
1254 .write
= openpic_tmr_write
,
1255 .read
= openpic_tmr_read
,
1256 .start_addr
= OPENPIC_TMR_REG_START
,
1257 .size
= OPENPIC_TMR_REG_SIZE
,
1260 static const struct mem_reg openpic_cpu_mmio
= {
1261 .write
= openpic_cpu_write
,
1262 .read
= openpic_cpu_read
,
1263 .start_addr
= OPENPIC_CPU_REG_START
,
1264 .size
= OPENPIC_CPU_REG_SIZE
,
1267 static const struct mem_reg openpic_src_mmio
= {
1268 .write
= openpic_src_write
,
1269 .read
= openpic_src_read
,
1270 .start_addr
= OPENPIC_SRC_REG_START
,
1271 .size
= OPENPIC_SRC_REG_SIZE
,
1274 static const struct mem_reg openpic_msi_mmio
= {
1275 .read
= openpic_msi_read
,
1276 .write
= openpic_msi_write
,
1277 .start_addr
= OPENPIC_MSI_REG_START
,
1278 .size
= OPENPIC_MSI_REG_SIZE
,
1281 static const struct mem_reg openpic_summary_mmio
= {
1282 .read
= openpic_summary_read
,
1283 .write
= openpic_summary_write
,
1284 .start_addr
= OPENPIC_SUMMARY_REG_START
,
1285 .size
= OPENPIC_SUMMARY_REG_SIZE
,
1288 static void add_mmio_region(struct openpic
*opp
, const struct mem_reg
*mr
)
1290 if (opp
->num_mmio_regions
>= MAX_MMIO_REGIONS
) {
1291 WARN(1, "kvm mpic: too many mmio regions\n");
1295 opp
->mmio_regions
[opp
->num_mmio_regions
++] = mr
;
1298 static void fsl_common_init(struct openpic
*opp
)
1303 add_mmio_region(opp
, &openpic_msi_mmio
);
1304 add_mmio_region(opp
, &openpic_summary_mmio
);
1306 opp
->vid
= VID_REVISION_1_2
;
1307 opp
->vir
= VIR_GENERIC
;
1308 opp
->vector_mask
= 0xFFFF;
1309 opp
->tfrr_reset
= 0;
1310 opp
->ivpr_reset
= IVPR_MASK_MASK
;
1311 opp
->idr_reset
= 1 << 0;
1312 opp
->max_irq
= MAX_IRQ
;
1314 opp
->irq_ipi0
= virq
;
1316 opp
->irq_tim0
= virq
;
1319 BUG_ON(virq
> MAX_IRQ
);
1323 for (i
= 0; i
< opp
->fsl
->max_ext
; i
++)
1324 opp
->src
[i
].level
= false;
1326 /* Internal interrupts, including message and MSI */
1327 for (i
= 16; i
< MAX_SRC
; i
++) {
1328 opp
->src
[i
].type
= IRQ_TYPE_FSLINT
;
1329 opp
->src
[i
].level
= true;
1332 /* timers and IPIs */
1333 for (i
= MAX_SRC
; i
< virq
; i
++) {
1334 opp
->src
[i
].type
= IRQ_TYPE_FSLSPECIAL
;
1335 opp
->src
[i
].level
= false;
1339 static int kvm_mpic_read_internal(struct openpic
*opp
, gpa_t addr
, u32
*ptr
)
1343 for (i
= 0; i
< opp
->num_mmio_regions
; i
++) {
1344 const struct mem_reg
*mr
= opp
->mmio_regions
[i
];
1346 if (mr
->start_addr
> addr
|| addr
>= mr
->start_addr
+ mr
->size
)
1349 return mr
->read(opp
, addr
- mr
->start_addr
, ptr
);
1355 static int kvm_mpic_write_internal(struct openpic
*opp
, gpa_t addr
, u32 val
)
1359 for (i
= 0; i
< opp
->num_mmio_regions
; i
++) {
1360 const struct mem_reg
*mr
= opp
->mmio_regions
[i
];
1362 if (mr
->start_addr
> addr
|| addr
>= mr
->start_addr
+ mr
->size
)
1365 return mr
->write(opp
, addr
- mr
->start_addr
, val
);
1371 static int kvm_mpic_read(struct kvm_vcpu
*vcpu
,
1372 struct kvm_io_device
*this,
1373 gpa_t addr
, int len
, void *ptr
)
1375 struct openpic
*opp
= container_of(this, struct openpic
, mmio
);
1382 if (addr
& (len
- 1)) {
1383 pr_debug("%s: bad alignment %llx/%d\n",
1384 __func__
, addr
, len
);
1388 spin_lock_irq(&opp
->lock
);
1389 ret
= kvm_mpic_read_internal(opp
, addr
- opp
->reg_base
, &u
.val
);
1390 spin_unlock_irq(&opp
->lock
);
1393 * Technically only 32-bit accesses are allowed, but be nice to
1394 * people dumping registers a byte at a time -- it works in real
1395 * hardware (reads only, not writes).
1398 *(u32
*)ptr
= u
.val
;
1399 pr_debug("%s: addr %llx ret %d len 4 val %x\n",
1400 __func__
, addr
, ret
, u
.val
);
1401 } else if (len
== 1) {
1402 *(u8
*)ptr
= u
.bytes
[addr
& 3];
1403 pr_debug("%s: addr %llx ret %d len 1 val %x\n",
1404 __func__
, addr
, ret
, u
.bytes
[addr
& 3]);
1406 pr_debug("%s: bad length %d\n", __func__
, len
);
1413 static int kvm_mpic_write(struct kvm_vcpu
*vcpu
,
1414 struct kvm_io_device
*this,
1415 gpa_t addr
, int len
, const void *ptr
)
1417 struct openpic
*opp
= container_of(this, struct openpic
, mmio
);
1421 pr_debug("%s: bad length %d\n", __func__
, len
);
1425 pr_debug("%s: bad alignment %llx/%d\n", __func__
, addr
, len
);
1429 spin_lock_irq(&opp
->lock
);
1430 ret
= kvm_mpic_write_internal(opp
, addr
- opp
->reg_base
,
1432 spin_unlock_irq(&opp
->lock
);
1434 pr_debug("%s: addr %llx ret %d val %x\n",
1435 __func__
, addr
, ret
, *(const u32
*)ptr
);
1440 static const struct kvm_io_device_ops mpic_mmio_ops
= {
1441 .read
= kvm_mpic_read
,
1442 .write
= kvm_mpic_write
,
1445 static void map_mmio(struct openpic
*opp
)
1447 kvm_iodevice_init(&opp
->mmio
, &mpic_mmio_ops
);
1449 kvm_io_bus_register_dev(opp
->kvm
, KVM_MMIO_BUS
,
1450 opp
->reg_base
, OPENPIC_REG_SIZE
,
1454 static void unmap_mmio(struct openpic
*opp
)
1456 kvm_io_bus_unregister_dev(opp
->kvm
, KVM_MMIO_BUS
, &opp
->mmio
);
1459 static int set_base_addr(struct openpic
*opp
, struct kvm_device_attr
*attr
)
1463 if (copy_from_user(&base
, (u64 __user
*)(long)attr
->addr
, sizeof(u64
)))
1466 if (base
& 0x3ffff) {
1467 pr_debug("kvm mpic %s: KVM_DEV_MPIC_BASE_ADDR %08llx not aligned\n",
1472 if (base
== opp
->reg_base
)
1475 mutex_lock(&opp
->kvm
->slots_lock
);
1478 opp
->reg_base
= base
;
1480 pr_debug("kvm mpic %s: KVM_DEV_MPIC_BASE_ADDR %08llx\n",
1489 mutex_unlock(&opp
->kvm
->slots_lock
);
1496 static int access_reg(struct openpic
*opp
, gpa_t addr
, u32
*val
, int type
)
1503 spin_lock_irq(&opp
->lock
);
1505 if (type
== ATTR_SET
)
1506 ret
= kvm_mpic_write_internal(opp
, addr
, *val
);
1508 ret
= kvm_mpic_read_internal(opp
, addr
, val
);
1510 spin_unlock_irq(&opp
->lock
);
1512 pr_debug("%s: type %d addr %llx val %x\n", __func__
, type
, addr
, *val
);
1517 static int mpic_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1519 struct openpic
*opp
= dev
->private;
1522 switch (attr
->group
) {
1523 case KVM_DEV_MPIC_GRP_MISC
:
1524 switch (attr
->attr
) {
1525 case KVM_DEV_MPIC_BASE_ADDR
:
1526 return set_base_addr(opp
, attr
);
1531 case KVM_DEV_MPIC_GRP_REGISTER
:
1532 if (get_user(attr32
, (u32 __user
*)(long)attr
->addr
))
1535 return access_reg(opp
, attr
->attr
, &attr32
, ATTR_SET
);
1537 case KVM_DEV_MPIC_GRP_IRQ_ACTIVE
:
1538 if (attr
->attr
> MAX_SRC
)
1541 if (get_user(attr32
, (u32 __user
*)(long)attr
->addr
))
1544 if (attr32
!= 0 && attr32
!= 1)
1547 spin_lock_irq(&opp
->lock
);
1548 openpic_set_irq(opp
, attr
->attr
, attr32
);
1549 spin_unlock_irq(&opp
->lock
);
1556 static int mpic_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1558 struct openpic
*opp
= dev
->private;
1563 switch (attr
->group
) {
1564 case KVM_DEV_MPIC_GRP_MISC
:
1565 switch (attr
->attr
) {
1566 case KVM_DEV_MPIC_BASE_ADDR
:
1567 mutex_lock(&opp
->kvm
->slots_lock
);
1568 attr64
= opp
->reg_base
;
1569 mutex_unlock(&opp
->kvm
->slots_lock
);
1571 if (copy_to_user((u64 __user
*)(long)attr
->addr
,
1572 &attr64
, sizeof(u64
)))
1580 case KVM_DEV_MPIC_GRP_REGISTER
:
1581 ret
= access_reg(opp
, attr
->attr
, &attr32
, ATTR_GET
);
1585 if (put_user(attr32
, (u32 __user
*)(long)attr
->addr
))
1590 case KVM_DEV_MPIC_GRP_IRQ_ACTIVE
:
1591 if (attr
->attr
> MAX_SRC
)
1594 spin_lock_irq(&opp
->lock
);
1595 attr32
= opp
->src
[attr
->attr
].pending
;
1596 spin_unlock_irq(&opp
->lock
);
1598 if (put_user(attr32
, (u32 __user
*)(long)attr
->addr
))
1607 static int mpic_has_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1609 switch (attr
->group
) {
1610 case KVM_DEV_MPIC_GRP_MISC
:
1611 switch (attr
->attr
) {
1612 case KVM_DEV_MPIC_BASE_ADDR
:
1618 case KVM_DEV_MPIC_GRP_REGISTER
:
1621 case KVM_DEV_MPIC_GRP_IRQ_ACTIVE
:
1622 if (attr
->attr
> MAX_SRC
)
1631 static void mpic_destroy(struct kvm_device
*dev
)
1633 struct openpic
*opp
= dev
->private;
1635 dev
->kvm
->arch
.mpic
= NULL
;
1640 static int mpic_set_default_irq_routing(struct openpic
*opp
)
1642 struct kvm_irq_routing_entry
*routing
;
1644 /* Create a nop default map, so that dereferencing it still works */
1645 routing
= kzalloc((sizeof(*routing
)), GFP_KERNEL
);
1649 kvm_set_irq_routing(opp
->kvm
, routing
, 0, 0);
1655 static int mpic_create(struct kvm_device
*dev
, u32 type
)
1657 struct openpic
*opp
;
1660 /* We only support one MPIC at a time for now */
1661 if (dev
->kvm
->arch
.mpic
)
1664 opp
= kzalloc(sizeof(struct openpic
), GFP_KERNEL
);
1669 opp
->kvm
= dev
->kvm
;
1672 spin_lock_init(&opp
->lock
);
1674 add_mmio_region(opp
, &openpic_gbl_mmio
);
1675 add_mmio_region(opp
, &openpic_tmr_mmio
);
1676 add_mmio_region(opp
, &openpic_src_mmio
);
1677 add_mmio_region(opp
, &openpic_cpu_mmio
);
1679 switch (opp
->model
) {
1680 case KVM_DEV_TYPE_FSL_MPIC_20
:
1681 opp
->fsl
= &fsl_mpic_20
;
1682 opp
->brr1
= 0x00400200;
1683 opp
->flags
|= OPENPIC_FLAG_IDR_CRIT
;
1685 opp
->mpic_mode_mask
= GCR_MODE_MIXED
;
1687 fsl_common_init(opp
);
1691 case KVM_DEV_TYPE_FSL_MPIC_42
:
1692 opp
->fsl
= &fsl_mpic_42
;
1693 opp
->brr1
= 0x00400402;
1694 opp
->flags
|= OPENPIC_FLAG_ILR
;
1696 opp
->mpic_mode_mask
= GCR_MODE_PROXY
;
1698 fsl_common_init(opp
);
1707 ret
= mpic_set_default_irq_routing(opp
);
1714 dev
->kvm
->arch
.mpic
= opp
;
1723 struct kvm_device_ops kvm_mpic_ops
= {
1725 .create
= mpic_create
,
1726 .destroy
= mpic_destroy
,
1727 .set_attr
= mpic_set_attr
,
1728 .get_attr
= mpic_get_attr
,
1729 .has_attr
= mpic_has_attr
,
1732 int kvmppc_mpic_connect_vcpu(struct kvm_device
*dev
, struct kvm_vcpu
*vcpu
,
1735 struct openpic
*opp
= dev
->private;
1738 if (dev
->ops
!= &kvm_mpic_ops
)
1740 if (opp
->kvm
!= vcpu
->kvm
)
1742 if (cpu
< 0 || cpu
>= MAX_CPU
)
1745 spin_lock_irq(&opp
->lock
);
1747 if (opp
->dst
[cpu
].vcpu
) {
1751 if (vcpu
->arch
.irq_type
) {
1756 opp
->dst
[cpu
].vcpu
= vcpu
;
1757 opp
->nb_cpus
= max(opp
->nb_cpus
, cpu
+ 1);
1759 vcpu
->arch
.mpic
= opp
;
1760 vcpu
->arch
.irq_cpu_id
= cpu
;
1761 vcpu
->arch
.irq_type
= KVMPPC_IRQ_MPIC
;
1763 /* This might need to be changed if GCR gets extended */
1764 if (opp
->mpic_mode_mask
== GCR_MODE_PROXY
)
1765 vcpu
->arch
.epr_flags
|= KVMPPC_EPR_KERNEL
;
1768 spin_unlock_irq(&opp
->lock
);
1773 * This should only happen immediately before the mpic is destroyed,
1774 * so we shouldn't need to worry about anything still trying to
1775 * access the vcpu pointer.
1777 void kvmppc_mpic_disconnect_vcpu(struct openpic
*opp
, struct kvm_vcpu
*vcpu
)
1779 BUG_ON(!opp
->dst
[vcpu
->arch
.irq_cpu_id
].vcpu
);
1781 opp
->dst
[vcpu
->arch
.irq_cpu_id
].vcpu
= NULL
;
1786 * < 0 Interrupt was ignored (masked or not delivered for other reasons)
1787 * = 0 Interrupt was coalesced (previous irq is still pending)
1788 * > 0 Number of CPUs interrupt was delivered to
1790 static int mpic_set_irq(struct kvm_kernel_irq_routing_entry
*e
,
1791 struct kvm
*kvm
, int irq_source_id
, int level
,
1794 u32 irq
= e
->irqchip
.pin
;
1795 struct openpic
*opp
= kvm
->arch
.mpic
;
1796 unsigned long flags
;
1798 spin_lock_irqsave(&opp
->lock
, flags
);
1799 openpic_set_irq(opp
, irq
, level
);
1800 spin_unlock_irqrestore(&opp
->lock
, flags
);
1802 /* All code paths we care about don't check for the return value */
1806 int kvm_set_msi(struct kvm_kernel_irq_routing_entry
*e
,
1807 struct kvm
*kvm
, int irq_source_id
, int level
, bool line_status
)
1809 struct openpic
*opp
= kvm
->arch
.mpic
;
1810 unsigned long flags
;
1812 spin_lock_irqsave(&opp
->lock
, flags
);
1815 * XXX We ignore the target address for now, as we only support
1816 * a single MSI bank.
1818 openpic_msi_write(kvm
->arch
.mpic
, MSIIR_OFFSET
, e
->msi
.data
);
1819 spin_unlock_irqrestore(&opp
->lock
, flags
);
1821 /* All code paths we care about don't check for the return value */
1825 int kvm_set_routing_entry(struct kvm
*kvm
,
1826 struct kvm_kernel_irq_routing_entry
*e
,
1827 const struct kvm_irq_routing_entry
*ue
)
1832 case KVM_IRQ_ROUTING_IRQCHIP
:
1833 e
->set
= mpic_set_irq
;
1834 e
->irqchip
.irqchip
= ue
->u
.irqchip
.irqchip
;
1835 e
->irqchip
.pin
= ue
->u
.irqchip
.pin
;
1836 if (e
->irqchip
.pin
>= KVM_IRQCHIP_NUM_PINS
)
1839 case KVM_IRQ_ROUTING_MSI
:
1840 e
->set
= kvm_set_msi
;
1841 e
->msi
.address_lo
= ue
->u
.msi
.address_lo
;
1842 e
->msi
.address_hi
= ue
->u
.msi
.address_hi
;
1843 e
->msi
.data
= ue
->u
.msi
.data
;