4 * Copyright (c) 2004 Jocelyn Mayer
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include <linux/slab.h>
27 #include <linux/mutex.h>
28 #include <linux/kvm_host.h>
29 #include <linux/errno.h>
31 #include <linux/anon_inodes.h>
32 #include <linux/uaccess.h>
34 #include <asm/kvm_para.h>
35 #include <asm/kvm_host.h>
36 #include <asm/kvm_ppc.h>
37 #include <kvm/iodev.h>
44 #define MAX_IRQ (MAX_SRC + MAX_IPI + MAX_TMR)
45 #define VID 0x03 /* MPIC version ID */
47 /* OpenPIC capability flags */
48 #define OPENPIC_FLAG_IDR_CRIT (1 << 0)
49 #define OPENPIC_FLAG_ILR (2 << 0)
51 /* OpenPIC address map */
52 #define OPENPIC_REG_SIZE 0x40000
53 #define OPENPIC_GLB_REG_START 0x0
54 #define OPENPIC_GLB_REG_SIZE 0x10F0
55 #define OPENPIC_TMR_REG_START 0x10F0
56 #define OPENPIC_TMR_REG_SIZE 0x220
57 #define OPENPIC_MSI_REG_START 0x1600
58 #define OPENPIC_MSI_REG_SIZE 0x200
59 #define OPENPIC_SUMMARY_REG_START 0x3800
60 #define OPENPIC_SUMMARY_REG_SIZE 0x800
61 #define OPENPIC_SRC_REG_START 0x10000
62 #define OPENPIC_SRC_REG_SIZE (MAX_SRC * 0x20)
63 #define OPENPIC_CPU_REG_START 0x20000
64 #define OPENPIC_CPU_REG_SIZE (0x100 + ((MAX_CPU - 1) * 0x1000))
66 struct fsl_mpic_info
{
70 static struct fsl_mpic_info fsl_mpic_20
= {
74 static struct fsl_mpic_info fsl_mpic_42
= {
78 #define FRR_NIRQ_SHIFT 16
79 #define FRR_NCPU_SHIFT 8
80 #define FRR_VID_SHIFT 0
82 #define VID_REVISION_1_2 2
83 #define VID_REVISION_1_3 3
85 #define VIR_GENERIC 0x00000000 /* Generic Vendor ID */
87 #define GCR_RESET 0x80000000
88 #define GCR_MODE_PASS 0x00000000
89 #define GCR_MODE_MIXED 0x20000000
90 #define GCR_MODE_PROXY 0x60000000
92 #define TBCR_CI 0x80000000 /* count inhibit */
93 #define TCCR_TOG 0x80000000 /* toggles when decrement to zero */
95 #define IDR_EP_SHIFT 31
96 #define IDR_EP_MASK (1 << IDR_EP_SHIFT)
97 #define IDR_CI0_SHIFT 30
98 #define IDR_CI1_SHIFT 29
99 #define IDR_P1_SHIFT 1
100 #define IDR_P0_SHIFT 0
102 #define ILR_INTTGT_MASK 0x000000ff
103 #define ILR_INTTGT_INT 0x00
104 #define ILR_INTTGT_CINT 0x01 /* critical */
105 #define ILR_INTTGT_MCP 0x02 /* machine check */
106 #define NUM_OUTPUTS 3
108 #define MSIIR_OFFSET 0x140
109 #define MSIIR_SRS_SHIFT 29
110 #define MSIIR_SRS_MASK (0x7 << MSIIR_SRS_SHIFT)
111 #define MSIIR_IBS_SHIFT 24
112 #define MSIIR_IBS_MASK (0x1f << MSIIR_IBS_SHIFT)
114 static int get_current_cpu(void)
116 #if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
117 struct kvm_vcpu
*vcpu
= current
->thread
.kvm_vcpu
;
118 return vcpu
? vcpu
->arch
.irq_cpu_id
: -1;
125 static int openpic_cpu_write_internal(void *opaque
, gpa_t addr
,
127 static int openpic_cpu_read_internal(void *opaque
, gpa_t addr
,
129 static inline void write_IRQreg_idr(struct openpic
*opp
, int n_IRQ
,
134 IRQ_TYPE_FSLINT
, /* FSL internal interrupt -- level only */
135 IRQ_TYPE_FSLSPECIAL
, /* FSL timer/IPI interrupt, edge, no polarity */
139 /* Round up to the nearest 64 IRQs so that the queue length
140 * won't change when moving between 32 and 64 bit hosts.
142 unsigned long queue
[BITS_TO_LONGS((MAX_IRQ
+ 63) & ~63)];
148 uint32_t ivpr
; /* IRQ vector/priority register */
149 uint32_t idr
; /* IRQ destination register */
150 uint32_t destmask
; /* bitmap of CPU destinations */
152 int output
; /* IRQ level, e.g. ILR_INTTGT_INT */
153 int pending
; /* TRUE if IRQ is pending */
155 bool level
:1; /* level-triggered */
156 bool nomask
:1; /* critical interrupts ignore mask on some FSL MPICs */
159 #define IVPR_MASK_SHIFT 31
160 #define IVPR_MASK_MASK (1 << IVPR_MASK_SHIFT)
161 #define IVPR_ACTIVITY_SHIFT 30
162 #define IVPR_ACTIVITY_MASK (1 << IVPR_ACTIVITY_SHIFT)
163 #define IVPR_MODE_SHIFT 29
164 #define IVPR_MODE_MASK (1 << IVPR_MODE_SHIFT)
165 #define IVPR_POLARITY_SHIFT 23
166 #define IVPR_POLARITY_MASK (1 << IVPR_POLARITY_SHIFT)
167 #define IVPR_SENSE_SHIFT 22
168 #define IVPR_SENSE_MASK (1 << IVPR_SENSE_SHIFT)
170 #define IVPR_PRIORITY_MASK (0xF << 16)
171 #define IVPR_PRIORITY(_ivprr_) ((int)(((_ivprr_) & IVPR_PRIORITY_MASK) >> 16))
172 #define IVPR_VECTOR(opp, _ivprr_) ((_ivprr_) & (opp)->vector_mask)
174 /* IDR[EP/CI] are only for FSL MPIC prior to v4.0 */
175 #define IDR_EP 0x80000000 /* external pin */
176 #define IDR_CI 0x40000000 /* critical interrupt */
179 struct kvm_vcpu
*vcpu
;
181 int32_t ctpr
; /* CPU current task priority */
182 struct irq_queue raised
;
183 struct irq_queue servicing
;
185 /* Count of IRQ sources asserting on non-INT outputs */
186 uint32_t outputs_active
[NUM_OUTPUTS
];
189 #define MAX_MMIO_REGIONS 10
193 struct kvm_device
*dev
;
194 struct kvm_io_device mmio
;
195 const struct mem_reg
*mmio_regions
[MAX_MMIO_REGIONS
];
196 int num_mmio_regions
;
201 /* Behavior control */
202 struct fsl_mpic_info
*fsl
;
207 uint32_t vir
; /* Vendor identification register */
208 uint32_t vector_mask
;
213 uint32_t mpic_mode_mask
;
215 /* Global registers */
216 uint32_t frr
; /* Feature reporting register */
217 uint32_t gcr
; /* Global configuration register */
218 uint32_t pir
; /* Processor initialization register */
219 uint32_t spve
; /* Spurious vector register */
220 uint32_t tfrr
; /* Timer frequency reporting register */
221 /* Source registers */
222 struct irq_source src
[MAX_IRQ
];
223 /* Local registers per output pin */
224 struct irq_dest dst
[MAX_CPU
];
226 /* Timer registers */
228 uint32_t tccr
; /* Global timer current count register */
229 uint32_t tbcr
; /* Global timer base count register */
231 /* Shared MSI registers */
233 uint32_t msir
; /* Shared Message Signaled Interrupt Register */
242 static void mpic_irq_raise(struct openpic
*opp
, struct irq_dest
*dst
,
245 struct kvm_interrupt irq
= {
246 .irq
= KVM_INTERRUPT_SET_LEVEL
,
250 pr_debug("%s: destination cpu %d does not exist\n",
251 __func__
, (int)(dst
- &opp
->dst
[0]));
255 pr_debug("%s: cpu %d output %d\n", __func__
, dst
->vcpu
->arch
.irq_cpu_id
,
258 if (output
!= ILR_INTTGT_INT
) /* TODO */
261 kvm_vcpu_ioctl_interrupt(dst
->vcpu
, &irq
);
264 static void mpic_irq_lower(struct openpic
*opp
, struct irq_dest
*dst
,
268 pr_debug("%s: destination cpu %d does not exist\n",
269 __func__
, (int)(dst
- &opp
->dst
[0]));
273 pr_debug("%s: cpu %d output %d\n", __func__
, dst
->vcpu
->arch
.irq_cpu_id
,
276 if (output
!= ILR_INTTGT_INT
) /* TODO */
279 kvmppc_core_dequeue_external(dst
->vcpu
);
282 static inline void IRQ_setbit(struct irq_queue
*q
, int n_IRQ
)
284 set_bit(n_IRQ
, q
->queue
);
287 static inline void IRQ_resetbit(struct irq_queue
*q
, int n_IRQ
)
289 clear_bit(n_IRQ
, q
->queue
);
292 static void IRQ_check(struct openpic
*opp
, struct irq_queue
*q
)
299 irq
= find_next_bit(q
->queue
, opp
->max_irq
, irq
+ 1);
300 if (irq
== opp
->max_irq
)
303 pr_debug("IRQ_check: irq %d set ivpr_pr=%d pr=%d\n",
304 irq
, IVPR_PRIORITY(opp
->src
[irq
].ivpr
), priority
);
306 if (IVPR_PRIORITY(opp
->src
[irq
].ivpr
) > priority
) {
308 priority
= IVPR_PRIORITY(opp
->src
[irq
].ivpr
);
313 q
->priority
= priority
;
316 static int IRQ_get_next(struct openpic
*opp
, struct irq_queue
*q
)
324 static void IRQ_local_pipe(struct openpic
*opp
, int n_CPU
, int n_IRQ
,
325 bool active
, bool was_active
)
327 struct irq_dest
*dst
;
328 struct irq_source
*src
;
331 dst
= &opp
->dst
[n_CPU
];
332 src
= &opp
->src
[n_IRQ
];
334 pr_debug("%s: IRQ %d active %d was %d\n",
335 __func__
, n_IRQ
, active
, was_active
);
337 if (src
->output
!= ILR_INTTGT_INT
) {
338 pr_debug("%s: output %d irq %d active %d was %d count %d\n",
339 __func__
, src
->output
, n_IRQ
, active
, was_active
,
340 dst
->outputs_active
[src
->output
]);
342 /* On Freescale MPIC, critical interrupts ignore priority,
343 * IACK, EOI, etc. Before MPIC v4.1 they also ignore
348 dst
->outputs_active
[src
->output
]++ == 0) {
349 pr_debug("%s: Raise OpenPIC output %d cpu %d irq %d\n",
350 __func__
, src
->output
, n_CPU
, n_IRQ
);
351 mpic_irq_raise(opp
, dst
, src
->output
);
355 --dst
->outputs_active
[src
->output
] == 0) {
356 pr_debug("%s: Lower OpenPIC output %d cpu %d irq %d\n",
357 __func__
, src
->output
, n_CPU
, n_IRQ
);
358 mpic_irq_lower(opp
, dst
, src
->output
);
365 priority
= IVPR_PRIORITY(src
->ivpr
);
367 /* Even if the interrupt doesn't have enough priority,
368 * it is still raised, in case ctpr is lowered later.
371 IRQ_setbit(&dst
->raised
, n_IRQ
);
373 IRQ_resetbit(&dst
->raised
, n_IRQ
);
375 IRQ_check(opp
, &dst
->raised
);
377 if (active
&& priority
<= dst
->ctpr
) {
378 pr_debug("%s: IRQ %d priority %d too low for ctpr %d on CPU %d\n",
379 __func__
, n_IRQ
, priority
, dst
->ctpr
, n_CPU
);
384 if (IRQ_get_next(opp
, &dst
->servicing
) >= 0 &&
385 priority
<= dst
->servicing
.priority
) {
386 pr_debug("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d\n",
387 __func__
, n_IRQ
, dst
->servicing
.next
, n_CPU
);
389 pr_debug("%s: Raise OpenPIC INT output cpu %d irq %d/%d\n",
390 __func__
, n_CPU
, n_IRQ
, dst
->raised
.next
);
391 mpic_irq_raise(opp
, dst
, ILR_INTTGT_INT
);
394 IRQ_get_next(opp
, &dst
->servicing
);
395 if (dst
->raised
.priority
> dst
->ctpr
&&
396 dst
->raised
.priority
> dst
->servicing
.priority
) {
397 pr_debug("%s: IRQ %d inactive, IRQ %d prio %d above %d/%d, CPU %d\n",
398 __func__
, n_IRQ
, dst
->raised
.next
,
399 dst
->raised
.priority
, dst
->ctpr
,
400 dst
->servicing
.priority
, n_CPU
);
401 /* IRQ line stays asserted */
403 pr_debug("%s: IRQ %d inactive, current prio %d/%d, CPU %d\n",
404 __func__
, n_IRQ
, dst
->ctpr
,
405 dst
->servicing
.priority
, n_CPU
);
406 mpic_irq_lower(opp
, dst
, ILR_INTTGT_INT
);
411 /* update pic state because registers for n_IRQ have changed value */
412 static void openpic_update_irq(struct openpic
*opp
, int n_IRQ
)
414 struct irq_source
*src
;
415 bool active
, was_active
;
418 src
= &opp
->src
[n_IRQ
];
419 active
= src
->pending
;
421 if ((src
->ivpr
& IVPR_MASK_MASK
) && !src
->nomask
) {
422 /* Interrupt source is disabled */
423 pr_debug("%s: IRQ %d is disabled\n", __func__
, n_IRQ
);
427 was_active
= !!(src
->ivpr
& IVPR_ACTIVITY_MASK
);
430 * We don't have a similar check for already-active because
431 * ctpr may have changed and we need to withdraw the interrupt.
433 if (!active
&& !was_active
) {
434 pr_debug("%s: IRQ %d is already inactive\n", __func__
, n_IRQ
);
439 src
->ivpr
|= IVPR_ACTIVITY_MASK
;
441 src
->ivpr
&= ~IVPR_ACTIVITY_MASK
;
443 if (src
->destmask
== 0) {
445 pr_debug("%s: IRQ %d has no target\n", __func__
, n_IRQ
);
449 if (src
->destmask
== (1 << src
->last_cpu
)) {
450 /* Only one CPU is allowed to receive this IRQ */
451 IRQ_local_pipe(opp
, src
->last_cpu
, n_IRQ
, active
, was_active
);
452 } else if (!(src
->ivpr
& IVPR_MODE_MASK
)) {
453 /* Directed delivery mode */
454 for (i
= 0; i
< opp
->nb_cpus
; i
++) {
455 if (src
->destmask
& (1 << i
)) {
456 IRQ_local_pipe(opp
, i
, n_IRQ
, active
,
461 /* Distributed delivery mode */
462 for (i
= src
->last_cpu
+ 1; i
!= src
->last_cpu
; i
++) {
463 if (i
== opp
->nb_cpus
)
466 if (src
->destmask
& (1 << i
)) {
467 IRQ_local_pipe(opp
, i
, n_IRQ
, active
,
476 static void openpic_set_irq(void *opaque
, int n_IRQ
, int level
)
478 struct openpic
*opp
= opaque
;
479 struct irq_source
*src
;
481 if (n_IRQ
>= MAX_IRQ
) {
482 WARN_ONCE(1, "%s: IRQ %d out of range\n", __func__
, n_IRQ
);
486 src
= &opp
->src
[n_IRQ
];
487 pr_debug("openpic: set irq %d = %d ivpr=0x%08x\n",
488 n_IRQ
, level
, src
->ivpr
);
490 /* level-sensitive irq */
491 src
->pending
= level
;
492 openpic_update_irq(opp
, n_IRQ
);
494 /* edge-sensitive irq */
497 openpic_update_irq(opp
, n_IRQ
);
500 if (src
->output
!= ILR_INTTGT_INT
) {
501 /* Edge-triggered interrupts shouldn't be used
502 * with non-INT delivery, but just in case,
503 * try to make it do something sane rather than
504 * cause an interrupt storm. This is close to
505 * what you'd probably see happen in real hardware.
508 openpic_update_irq(opp
, n_IRQ
);
513 static void openpic_reset(struct openpic
*opp
)
517 opp
->gcr
= GCR_RESET
;
518 /* Initialise controller registers */
519 opp
->frr
= ((opp
->nb_irqs
- 1) << FRR_NIRQ_SHIFT
) |
520 (opp
->vid
<< FRR_VID_SHIFT
);
523 opp
->spve
= -1 & opp
->vector_mask
;
524 opp
->tfrr
= opp
->tfrr_reset
;
525 /* Initialise IRQ sources */
526 for (i
= 0; i
< opp
->max_irq
; i
++) {
527 opp
->src
[i
].ivpr
= opp
->ivpr_reset
;
529 switch (opp
->src
[i
].type
) {
530 case IRQ_TYPE_NORMAL
:
532 !!(opp
->ivpr_reset
& IVPR_SENSE_MASK
);
535 case IRQ_TYPE_FSLINT
:
536 opp
->src
[i
].ivpr
|= IVPR_POLARITY_MASK
;
539 case IRQ_TYPE_FSLSPECIAL
:
543 write_IRQreg_idr(opp
, i
, opp
->idr_reset
);
545 /* Initialise IRQ destinations */
546 for (i
= 0; i
< MAX_CPU
; i
++) {
547 opp
->dst
[i
].ctpr
= 15;
548 memset(&opp
->dst
[i
].raised
, 0, sizeof(struct irq_queue
));
549 opp
->dst
[i
].raised
.next
= -1;
550 memset(&opp
->dst
[i
].servicing
, 0, sizeof(struct irq_queue
));
551 opp
->dst
[i
].servicing
.next
= -1;
553 /* Initialise timers */
554 for (i
= 0; i
< MAX_TMR
; i
++) {
555 opp
->timers
[i
].tccr
= 0;
556 opp
->timers
[i
].tbcr
= TBCR_CI
;
558 /* Go out of RESET state */
562 static inline uint32_t read_IRQreg_idr(struct openpic
*opp
, int n_IRQ
)
564 return opp
->src
[n_IRQ
].idr
;
567 static inline uint32_t read_IRQreg_ilr(struct openpic
*opp
, int n_IRQ
)
569 if (opp
->flags
& OPENPIC_FLAG_ILR
)
570 return opp
->src
[n_IRQ
].output
;
575 static inline uint32_t read_IRQreg_ivpr(struct openpic
*opp
, int n_IRQ
)
577 return opp
->src
[n_IRQ
].ivpr
;
580 static inline void write_IRQreg_idr(struct openpic
*opp
, int n_IRQ
,
583 struct irq_source
*src
= &opp
->src
[n_IRQ
];
584 uint32_t normal_mask
= (1UL << opp
->nb_cpus
) - 1;
585 uint32_t crit_mask
= 0;
586 uint32_t mask
= normal_mask
;
587 int crit_shift
= IDR_EP_SHIFT
- opp
->nb_cpus
;
590 if (opp
->flags
& OPENPIC_FLAG_IDR_CRIT
) {
591 crit_mask
= mask
<< crit_shift
;
592 mask
|= crit_mask
| IDR_EP
;
595 src
->idr
= val
& mask
;
596 pr_debug("Set IDR %d to 0x%08x\n", n_IRQ
, src
->idr
);
598 if (opp
->flags
& OPENPIC_FLAG_IDR_CRIT
) {
599 if (src
->idr
& crit_mask
) {
600 if (src
->idr
& normal_mask
) {
601 pr_debug("%s: IRQ configured for multiple output types, using critical\n",
605 src
->output
= ILR_INTTGT_CINT
;
609 for (i
= 0; i
< opp
->nb_cpus
; i
++) {
610 int n_ci
= IDR_CI0_SHIFT
- i
;
612 if (src
->idr
& (1UL << n_ci
))
613 src
->destmask
|= 1UL << i
;
616 src
->output
= ILR_INTTGT_INT
;
618 src
->destmask
= src
->idr
& normal_mask
;
621 src
->destmask
= src
->idr
;
625 static inline void write_IRQreg_ilr(struct openpic
*opp
, int n_IRQ
,
628 if (opp
->flags
& OPENPIC_FLAG_ILR
) {
629 struct irq_source
*src
= &opp
->src
[n_IRQ
];
631 src
->output
= val
& ILR_INTTGT_MASK
;
632 pr_debug("Set ILR %d to 0x%08x, output %d\n", n_IRQ
, src
->idr
,
635 /* TODO: on MPIC v4.0 only, set nomask for non-INT */
639 static inline void write_IRQreg_ivpr(struct openpic
*opp
, int n_IRQ
,
644 /* NOTE when implementing newer FSL MPIC models: starting with v4.0,
645 * the polarity bit is read-only on internal interrupts.
647 mask
= IVPR_MASK_MASK
| IVPR_PRIORITY_MASK
| IVPR_SENSE_MASK
|
648 IVPR_POLARITY_MASK
| opp
->vector_mask
;
650 /* ACTIVITY bit is read-only */
651 opp
->src
[n_IRQ
].ivpr
=
652 (opp
->src
[n_IRQ
].ivpr
& IVPR_ACTIVITY_MASK
) | (val
& mask
);
654 /* For FSL internal interrupts, The sense bit is reserved and zero,
655 * and the interrupt is always level-triggered. Timers and IPIs
656 * have no sense or polarity bits, and are edge-triggered.
658 switch (opp
->src
[n_IRQ
].type
) {
659 case IRQ_TYPE_NORMAL
:
660 opp
->src
[n_IRQ
].level
=
661 !!(opp
->src
[n_IRQ
].ivpr
& IVPR_SENSE_MASK
);
664 case IRQ_TYPE_FSLINT
:
665 opp
->src
[n_IRQ
].ivpr
&= ~IVPR_SENSE_MASK
;
668 case IRQ_TYPE_FSLSPECIAL
:
669 opp
->src
[n_IRQ
].ivpr
&= ~(IVPR_POLARITY_MASK
| IVPR_SENSE_MASK
);
673 openpic_update_irq(opp
, n_IRQ
);
674 pr_debug("Set IVPR %d to 0x%08x -> 0x%08x\n", n_IRQ
, val
,
675 opp
->src
[n_IRQ
].ivpr
);
678 static void openpic_gcr_write(struct openpic
*opp
, uint64_t val
)
680 if (val
& GCR_RESET
) {
685 opp
->gcr
&= ~opp
->mpic_mode_mask
;
686 opp
->gcr
|= val
& opp
->mpic_mode_mask
;
689 static int openpic_gbl_write(void *opaque
, gpa_t addr
, u32 val
)
691 struct openpic
*opp
= opaque
;
694 pr_debug("%s: addr %#llx <= %08x\n", __func__
, addr
, val
);
699 case 0x00: /* Block Revision Register1 (BRR1) is Readonly */
709 err
= openpic_cpu_write_internal(opp
, addr
, val
,
712 case 0x1000: /* FRR */
714 case 0x1020: /* GCR */
715 openpic_gcr_write(opp
, val
);
717 case 0x1080: /* VIR */
719 case 0x1090: /* PIR */
721 * This register is used to reset a CPU core --
722 * let userspace handle it.
726 case 0x10A0: /* IPI_IVPR */
731 idx
= (addr
- 0x10A0) >> 4;
732 write_IRQreg_ivpr(opp
, opp
->irq_ipi0
+ idx
, val
);
735 case 0x10E0: /* SPVE */
736 opp
->spve
= val
& opp
->vector_mask
;
745 static int openpic_gbl_read(void *opaque
, gpa_t addr
, u32
*ptr
)
747 struct openpic
*opp
= opaque
;
751 pr_debug("%s: addr %#llx\n", __func__
, addr
);
757 case 0x1000: /* FRR */
759 retval
|= (opp
->nb_cpus
- 1) << FRR_NCPU_SHIFT
;
761 case 0x1020: /* GCR */
764 case 0x1080: /* VIR */
767 case 0x1090: /* PIR */
770 case 0x00: /* Block Revision Register1 (BRR1) */
781 err
= openpic_cpu_read_internal(opp
, addr
,
782 &retval
, get_current_cpu());
784 case 0x10A0: /* IPI_IVPR */
790 idx
= (addr
- 0x10A0) >> 4;
791 retval
= read_IRQreg_ivpr(opp
, opp
->irq_ipi0
+ idx
);
794 case 0x10E0: /* SPVE */
802 pr_debug("%s: => 0x%08x\n", __func__
, retval
);
807 static int openpic_tmr_write(void *opaque
, gpa_t addr
, u32 val
)
809 struct openpic
*opp
= opaque
;
814 pr_debug("%s: addr %#llx <= %08x\n", __func__
, addr
, val
);
818 if (addr
== 0x10f0) {
824 idx
= (addr
>> 6) & 0x3;
827 switch (addr
& 0x30) {
828 case 0x00: /* TCCR */
830 case 0x10: /* TBCR */
831 if ((opp
->timers
[idx
].tccr
& TCCR_TOG
) != 0 &&
832 (val
& TBCR_CI
) == 0 &&
833 (opp
->timers
[idx
].tbcr
& TBCR_CI
) != 0)
834 opp
->timers
[idx
].tccr
&= ~TCCR_TOG
;
836 opp
->timers
[idx
].tbcr
= val
;
838 case 0x20: /* TVPR */
839 write_IRQreg_ivpr(opp
, opp
->irq_tim0
+ idx
, val
);
842 write_IRQreg_idr(opp
, opp
->irq_tim0
+ idx
, val
);
849 static int openpic_tmr_read(void *opaque
, gpa_t addr
, u32
*ptr
)
851 struct openpic
*opp
= opaque
;
852 uint32_t retval
= -1;
855 pr_debug("%s: addr %#llx\n", __func__
, addr
);
859 idx
= (addr
>> 6) & 0x3;
866 switch (addr
& 0x30) {
867 case 0x00: /* TCCR */
868 retval
= opp
->timers
[idx
].tccr
;
870 case 0x10: /* TBCR */
871 retval
= opp
->timers
[idx
].tbcr
;
873 case 0x20: /* TIPV */
874 retval
= read_IRQreg_ivpr(opp
, opp
->irq_tim0
+ idx
);
876 case 0x30: /* TIDE (TIDR) */
877 retval
= read_IRQreg_idr(opp
, opp
->irq_tim0
+ idx
);
882 pr_debug("%s: => 0x%08x\n", __func__
, retval
);
887 static int openpic_src_write(void *opaque
, gpa_t addr
, u32 val
)
889 struct openpic
*opp
= opaque
;
892 pr_debug("%s: addr %#llx <= %08x\n", __func__
, addr
, val
);
894 addr
= addr
& 0xffff;
897 switch (addr
& 0x1f) {
899 write_IRQreg_ivpr(opp
, idx
, val
);
902 write_IRQreg_idr(opp
, idx
, val
);
905 write_IRQreg_ilr(opp
, idx
, val
);
912 static int openpic_src_read(void *opaque
, gpa_t addr
, u32
*ptr
)
914 struct openpic
*opp
= opaque
;
918 pr_debug("%s: addr %#llx\n", __func__
, addr
);
921 addr
= addr
& 0xffff;
924 switch (addr
& 0x1f) {
926 retval
= read_IRQreg_ivpr(opp
, idx
);
929 retval
= read_IRQreg_idr(opp
, idx
);
932 retval
= read_IRQreg_ilr(opp
, idx
);
936 pr_debug("%s: => 0x%08x\n", __func__
, retval
);
941 static int openpic_msi_write(void *opaque
, gpa_t addr
, u32 val
)
943 struct openpic
*opp
= opaque
;
944 int idx
= opp
->irq_msi
;
947 pr_debug("%s: addr %#llx <= 0x%08x\n", __func__
, addr
, val
);
953 srs
= val
>> MSIIR_SRS_SHIFT
;
955 ibs
= (val
& MSIIR_IBS_MASK
) >> MSIIR_IBS_SHIFT
;
956 opp
->msi
[srs
].msir
|= 1 << ibs
;
957 openpic_set_irq(opp
, idx
, 1);
960 /* most registers are read-only, thus ignored */
967 static int openpic_msi_read(void *opaque
, gpa_t addr
, u32
*ptr
)
969 struct openpic
*opp
= opaque
;
973 pr_debug("%s: addr %#llx\n", __func__
, addr
);
987 case 0x70: /* MSIRs */
988 r
= opp
->msi
[srs
].msir
;
990 opp
->msi
[srs
].msir
= 0;
991 openpic_set_irq(opp
, opp
->irq_msi
+ srs
, 0);
993 case 0x120: /* MSISR */
994 for (i
= 0; i
< MAX_MSI
; i
++)
995 r
|= (opp
->msi
[i
].msir
? 1 : 0) << i
;
999 pr_debug("%s: => 0x%08x\n", __func__
, r
);
1004 static int openpic_summary_read(void *opaque
, gpa_t addr
, u32
*ptr
)
1008 pr_debug("%s: addr %#llx\n", __func__
, addr
);
1010 /* TODO: EISR/EIMR */
1016 static int openpic_summary_write(void *opaque
, gpa_t addr
, u32 val
)
1018 pr_debug("%s: addr %#llx <= 0x%08x\n", __func__
, addr
, val
);
1020 /* TODO: EISR/EIMR */
1024 static int openpic_cpu_write_internal(void *opaque
, gpa_t addr
,
1027 struct openpic
*opp
= opaque
;
1028 struct irq_source
*src
;
1029 struct irq_dest
*dst
;
1032 pr_debug("%s: cpu %d addr %#llx <= 0x%08x\n", __func__
, idx
,
1041 dst
= &opp
->dst
[idx
];
1044 case 0x40: /* IPIDR */
1048 idx
= (addr
- 0x40) >> 4;
1049 /* we use IDE as mask which CPUs to deliver the IPI to still. */
1050 opp
->src
[opp
->irq_ipi0
+ idx
].destmask
|= val
;
1051 openpic_set_irq(opp
, opp
->irq_ipi0
+ idx
, 1);
1052 openpic_set_irq(opp
, opp
->irq_ipi0
+ idx
, 0);
1054 case 0x80: /* CTPR */
1055 dst
->ctpr
= val
& 0x0000000F;
1057 pr_debug("%s: set CPU %d ctpr to %d, raised %d servicing %d\n",
1058 __func__
, idx
, dst
->ctpr
, dst
->raised
.priority
,
1059 dst
->servicing
.priority
);
1061 if (dst
->raised
.priority
<= dst
->ctpr
) {
1062 pr_debug("%s: Lower OpenPIC INT output cpu %d due to ctpr\n",
1064 mpic_irq_lower(opp
, dst
, ILR_INTTGT_INT
);
1065 } else if (dst
->raised
.priority
> dst
->servicing
.priority
) {
1066 pr_debug("%s: Raise OpenPIC INT output cpu %d irq %d\n",
1067 __func__
, idx
, dst
->raised
.next
);
1068 mpic_irq_raise(opp
, dst
, ILR_INTTGT_INT
);
1072 case 0x90: /* WHOAMI */
1073 /* Read-only register */
1075 case 0xA0: /* IACK */
1076 /* Read-only register */
1078 case 0xB0: { /* EOI */
1082 s_IRQ
= IRQ_get_next(opp
, &dst
->servicing
);
1085 pr_debug("%s: EOI with no interrupt in service\n",
1090 IRQ_resetbit(&dst
->servicing
, s_IRQ
);
1091 /* Notify listeners that the IRQ is over */
1093 /* Set up next servicing IRQ */
1094 s_IRQ
= IRQ_get_next(opp
, &dst
->servicing
);
1095 /* Check queued interrupts. */
1096 n_IRQ
= IRQ_get_next(opp
, &dst
->raised
);
1097 src
= &opp
->src
[n_IRQ
];
1100 IVPR_PRIORITY(src
->ivpr
) > dst
->servicing
.priority
)) {
1101 pr_debug("Raise OpenPIC INT output cpu %d irq %d\n",
1103 mpic_irq_raise(opp
, dst
, ILR_INTTGT_INT
);
1106 spin_unlock(&opp
->lock
);
1107 kvm_notify_acked_irq(opp
->kvm
, 0, notify_eoi
);
1108 spin_lock(&opp
->lock
);
1119 static int openpic_cpu_write(void *opaque
, gpa_t addr
, u32 val
)
1121 struct openpic
*opp
= opaque
;
1123 return openpic_cpu_write_internal(opp
, addr
, val
,
1124 (addr
& 0x1f000) >> 12);
1127 static uint32_t openpic_iack(struct openpic
*opp
, struct irq_dest
*dst
,
1130 struct irq_source
*src
;
1133 pr_debug("Lower OpenPIC INT output\n");
1134 mpic_irq_lower(opp
, dst
, ILR_INTTGT_INT
);
1136 irq
= IRQ_get_next(opp
, &dst
->raised
);
1137 pr_debug("IACK: irq=%d\n", irq
);
1140 /* No more interrupt pending */
1143 src
= &opp
->src
[irq
];
1144 if (!(src
->ivpr
& IVPR_ACTIVITY_MASK
) ||
1145 !(IVPR_PRIORITY(src
->ivpr
) > dst
->ctpr
)) {
1146 pr_err("%s: bad raised IRQ %d ctpr %d ivpr 0x%08x\n",
1147 __func__
, irq
, dst
->ctpr
, src
->ivpr
);
1148 openpic_update_irq(opp
, irq
);
1151 /* IRQ enter servicing state */
1152 IRQ_setbit(&dst
->servicing
, irq
);
1153 retval
= IVPR_VECTOR(opp
, src
->ivpr
);
1157 /* edge-sensitive IRQ */
1158 src
->ivpr
&= ~IVPR_ACTIVITY_MASK
;
1160 IRQ_resetbit(&dst
->raised
, irq
);
1163 if ((irq
>= opp
->irq_ipi0
) && (irq
< (opp
->irq_ipi0
+ MAX_IPI
))) {
1164 src
->destmask
&= ~(1 << cpu
);
1165 if (src
->destmask
&& !src
->level
) {
1166 /* trigger on CPUs that didn't know about it yet */
1167 openpic_set_irq(opp
, irq
, 1);
1168 openpic_set_irq(opp
, irq
, 0);
1169 /* if all CPUs knew about it, set active bit again */
1170 src
->ivpr
|= IVPR_ACTIVITY_MASK
;
1177 void kvmppc_mpic_set_epr(struct kvm_vcpu
*vcpu
)
1179 struct openpic
*opp
= vcpu
->arch
.mpic
;
1180 int cpu
= vcpu
->arch
.irq_cpu_id
;
1181 unsigned long flags
;
1183 spin_lock_irqsave(&opp
->lock
, flags
);
1185 if ((opp
->gcr
& opp
->mpic_mode_mask
) == GCR_MODE_PROXY
)
1186 kvmppc_set_epr(vcpu
, openpic_iack(opp
, &opp
->dst
[cpu
], cpu
));
1188 spin_unlock_irqrestore(&opp
->lock
, flags
);
1191 static int openpic_cpu_read_internal(void *opaque
, gpa_t addr
,
1194 struct openpic
*opp
= opaque
;
1195 struct irq_dest
*dst
;
1198 pr_debug("%s: cpu %d addr %#llx\n", __func__
, idx
, addr
);
1199 retval
= 0xFFFFFFFF;
1207 dst
= &opp
->dst
[idx
];
1210 case 0x80: /* CTPR */
1213 case 0x90: /* WHOAMI */
1216 case 0xA0: /* IACK */
1217 retval
= openpic_iack(opp
, dst
, idx
);
1219 case 0xB0: /* EOI */
1225 pr_debug("%s: => 0x%08x\n", __func__
, retval
);
1232 static int openpic_cpu_read(void *opaque
, gpa_t addr
, u32
*ptr
)
1234 struct openpic
*opp
= opaque
;
1236 return openpic_cpu_read_internal(opp
, addr
, ptr
,
1237 (addr
& 0x1f000) >> 12);
1241 int (*read
)(void *opaque
, gpa_t addr
, u32
*ptr
);
1242 int (*write
)(void *opaque
, gpa_t addr
, u32 val
);
1247 static const struct mem_reg openpic_gbl_mmio
= {
1248 .write
= openpic_gbl_write
,
1249 .read
= openpic_gbl_read
,
1250 .start_addr
= OPENPIC_GLB_REG_START
,
1251 .size
= OPENPIC_GLB_REG_SIZE
,
1254 static const struct mem_reg openpic_tmr_mmio
= {
1255 .write
= openpic_tmr_write
,
1256 .read
= openpic_tmr_read
,
1257 .start_addr
= OPENPIC_TMR_REG_START
,
1258 .size
= OPENPIC_TMR_REG_SIZE
,
1261 static const struct mem_reg openpic_cpu_mmio
= {
1262 .write
= openpic_cpu_write
,
1263 .read
= openpic_cpu_read
,
1264 .start_addr
= OPENPIC_CPU_REG_START
,
1265 .size
= OPENPIC_CPU_REG_SIZE
,
1268 static const struct mem_reg openpic_src_mmio
= {
1269 .write
= openpic_src_write
,
1270 .read
= openpic_src_read
,
1271 .start_addr
= OPENPIC_SRC_REG_START
,
1272 .size
= OPENPIC_SRC_REG_SIZE
,
1275 static const struct mem_reg openpic_msi_mmio
= {
1276 .read
= openpic_msi_read
,
1277 .write
= openpic_msi_write
,
1278 .start_addr
= OPENPIC_MSI_REG_START
,
1279 .size
= OPENPIC_MSI_REG_SIZE
,
1282 static const struct mem_reg openpic_summary_mmio
= {
1283 .read
= openpic_summary_read
,
1284 .write
= openpic_summary_write
,
1285 .start_addr
= OPENPIC_SUMMARY_REG_START
,
1286 .size
= OPENPIC_SUMMARY_REG_SIZE
,
1289 static void add_mmio_region(struct openpic
*opp
, const struct mem_reg
*mr
)
1291 if (opp
->num_mmio_regions
>= MAX_MMIO_REGIONS
) {
1292 WARN(1, "kvm mpic: too many mmio regions\n");
1296 opp
->mmio_regions
[opp
->num_mmio_regions
++] = mr
;
1299 static void fsl_common_init(struct openpic
*opp
)
1304 add_mmio_region(opp
, &openpic_msi_mmio
);
1305 add_mmio_region(opp
, &openpic_summary_mmio
);
1307 opp
->vid
= VID_REVISION_1_2
;
1308 opp
->vir
= VIR_GENERIC
;
1309 opp
->vector_mask
= 0xFFFF;
1310 opp
->tfrr_reset
= 0;
1311 opp
->ivpr_reset
= IVPR_MASK_MASK
;
1312 opp
->idr_reset
= 1 << 0;
1313 opp
->max_irq
= MAX_IRQ
;
1315 opp
->irq_ipi0
= virq
;
1317 opp
->irq_tim0
= virq
;
1320 BUG_ON(virq
> MAX_IRQ
);
1324 for (i
= 0; i
< opp
->fsl
->max_ext
; i
++)
1325 opp
->src
[i
].level
= false;
1327 /* Internal interrupts, including message and MSI */
1328 for (i
= 16; i
< MAX_SRC
; i
++) {
1329 opp
->src
[i
].type
= IRQ_TYPE_FSLINT
;
1330 opp
->src
[i
].level
= true;
1333 /* timers and IPIs */
1334 for (i
= MAX_SRC
; i
< virq
; i
++) {
1335 opp
->src
[i
].type
= IRQ_TYPE_FSLSPECIAL
;
1336 opp
->src
[i
].level
= false;
1340 static int kvm_mpic_read_internal(struct openpic
*opp
, gpa_t addr
, u32
*ptr
)
1344 for (i
= 0; i
< opp
->num_mmio_regions
; i
++) {
1345 const struct mem_reg
*mr
= opp
->mmio_regions
[i
];
1347 if (mr
->start_addr
> addr
|| addr
>= mr
->start_addr
+ mr
->size
)
1350 return mr
->read(opp
, addr
- mr
->start_addr
, ptr
);
1356 static int kvm_mpic_write_internal(struct openpic
*opp
, gpa_t addr
, u32 val
)
1360 for (i
= 0; i
< opp
->num_mmio_regions
; i
++) {
1361 const struct mem_reg
*mr
= opp
->mmio_regions
[i
];
1363 if (mr
->start_addr
> addr
|| addr
>= mr
->start_addr
+ mr
->size
)
1366 return mr
->write(opp
, addr
- mr
->start_addr
, val
);
1372 static int kvm_mpic_read(struct kvm_vcpu
*vcpu
,
1373 struct kvm_io_device
*this,
1374 gpa_t addr
, int len
, void *ptr
)
1376 struct openpic
*opp
= container_of(this, struct openpic
, mmio
);
1383 if (addr
& (len
- 1)) {
1384 pr_debug("%s: bad alignment %llx/%d\n",
1385 __func__
, addr
, len
);
1389 spin_lock_irq(&opp
->lock
);
1390 ret
= kvm_mpic_read_internal(opp
, addr
- opp
->reg_base
, &u
.val
);
1391 spin_unlock_irq(&opp
->lock
);
1394 * Technically only 32-bit accesses are allowed, but be nice to
1395 * people dumping registers a byte at a time -- it works in real
1396 * hardware (reads only, not writes).
1399 *(u32
*)ptr
= u
.val
;
1400 pr_debug("%s: addr %llx ret %d len 4 val %x\n",
1401 __func__
, addr
, ret
, u
.val
);
1402 } else if (len
== 1) {
1403 *(u8
*)ptr
= u
.bytes
[addr
& 3];
1404 pr_debug("%s: addr %llx ret %d len 1 val %x\n",
1405 __func__
, addr
, ret
, u
.bytes
[addr
& 3]);
1407 pr_debug("%s: bad length %d\n", __func__
, len
);
1414 static int kvm_mpic_write(struct kvm_vcpu
*vcpu
,
1415 struct kvm_io_device
*this,
1416 gpa_t addr
, int len
, const void *ptr
)
1418 struct openpic
*opp
= container_of(this, struct openpic
, mmio
);
1422 pr_debug("%s: bad length %d\n", __func__
, len
);
1426 pr_debug("%s: bad alignment %llx/%d\n", __func__
, addr
, len
);
1430 spin_lock_irq(&opp
->lock
);
1431 ret
= kvm_mpic_write_internal(opp
, addr
- opp
->reg_base
,
1433 spin_unlock_irq(&opp
->lock
);
1435 pr_debug("%s: addr %llx ret %d val %x\n",
1436 __func__
, addr
, ret
, *(const u32
*)ptr
);
1441 static const struct kvm_io_device_ops mpic_mmio_ops
= {
1442 .read
= kvm_mpic_read
,
1443 .write
= kvm_mpic_write
,
1446 static void map_mmio(struct openpic
*opp
)
1448 kvm_iodevice_init(&opp
->mmio
, &mpic_mmio_ops
);
1450 kvm_io_bus_register_dev(opp
->kvm
, KVM_MMIO_BUS
,
1451 opp
->reg_base
, OPENPIC_REG_SIZE
,
1455 static void unmap_mmio(struct openpic
*opp
)
1457 kvm_io_bus_unregister_dev(opp
->kvm
, KVM_MMIO_BUS
, &opp
->mmio
);
1460 static int set_base_addr(struct openpic
*opp
, struct kvm_device_attr
*attr
)
1464 if (copy_from_user(&base
, (u64 __user
*)(long)attr
->addr
, sizeof(u64
)))
1467 if (base
& 0x3ffff) {
1468 pr_debug("kvm mpic %s: KVM_DEV_MPIC_BASE_ADDR %08llx not aligned\n",
1473 if (base
== opp
->reg_base
)
1476 mutex_lock(&opp
->kvm
->slots_lock
);
1479 opp
->reg_base
= base
;
1481 pr_debug("kvm mpic %s: KVM_DEV_MPIC_BASE_ADDR %08llx\n",
1490 mutex_unlock(&opp
->kvm
->slots_lock
);
1497 static int access_reg(struct openpic
*opp
, gpa_t addr
, u32
*val
, int type
)
1504 spin_lock_irq(&opp
->lock
);
1506 if (type
== ATTR_SET
)
1507 ret
= kvm_mpic_write_internal(opp
, addr
, *val
);
1509 ret
= kvm_mpic_read_internal(opp
, addr
, val
);
1511 spin_unlock_irq(&opp
->lock
);
1513 pr_debug("%s: type %d addr %llx val %x\n", __func__
, type
, addr
, *val
);
1518 static int mpic_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1520 struct openpic
*opp
= dev
->private;
1523 switch (attr
->group
) {
1524 case KVM_DEV_MPIC_GRP_MISC
:
1525 switch (attr
->attr
) {
1526 case KVM_DEV_MPIC_BASE_ADDR
:
1527 return set_base_addr(opp
, attr
);
1532 case KVM_DEV_MPIC_GRP_REGISTER
:
1533 if (get_user(attr32
, (u32 __user
*)(long)attr
->addr
))
1536 return access_reg(opp
, attr
->attr
, &attr32
, ATTR_SET
);
1538 case KVM_DEV_MPIC_GRP_IRQ_ACTIVE
:
1539 if (attr
->attr
> MAX_SRC
)
1542 if (get_user(attr32
, (u32 __user
*)(long)attr
->addr
))
1545 if (attr32
!= 0 && attr32
!= 1)
1548 spin_lock_irq(&opp
->lock
);
1549 openpic_set_irq(opp
, attr
->attr
, attr32
);
1550 spin_unlock_irq(&opp
->lock
);
1557 static int mpic_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1559 struct openpic
*opp
= dev
->private;
1564 switch (attr
->group
) {
1565 case KVM_DEV_MPIC_GRP_MISC
:
1566 switch (attr
->attr
) {
1567 case KVM_DEV_MPIC_BASE_ADDR
:
1568 mutex_lock(&opp
->kvm
->slots_lock
);
1569 attr64
= opp
->reg_base
;
1570 mutex_unlock(&opp
->kvm
->slots_lock
);
1572 if (copy_to_user((u64 __user
*)(long)attr
->addr
,
1573 &attr64
, sizeof(u64
)))
1581 case KVM_DEV_MPIC_GRP_REGISTER
:
1582 ret
= access_reg(opp
, attr
->attr
, &attr32
, ATTR_GET
);
1586 if (put_user(attr32
, (u32 __user
*)(long)attr
->addr
))
1591 case KVM_DEV_MPIC_GRP_IRQ_ACTIVE
:
1592 if (attr
->attr
> MAX_SRC
)
1595 spin_lock_irq(&opp
->lock
);
1596 attr32
= opp
->src
[attr
->attr
].pending
;
1597 spin_unlock_irq(&opp
->lock
);
1599 if (put_user(attr32
, (u32 __user
*)(long)attr
->addr
))
1608 static int mpic_has_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1610 switch (attr
->group
) {
1611 case KVM_DEV_MPIC_GRP_MISC
:
1612 switch (attr
->attr
) {
1613 case KVM_DEV_MPIC_BASE_ADDR
:
1619 case KVM_DEV_MPIC_GRP_REGISTER
:
1622 case KVM_DEV_MPIC_GRP_IRQ_ACTIVE
:
1623 if (attr
->attr
> MAX_SRC
)
1632 static void mpic_destroy(struct kvm_device
*dev
)
1634 struct openpic
*opp
= dev
->private;
1636 dev
->kvm
->arch
.mpic
= NULL
;
1641 static int mpic_set_default_irq_routing(struct openpic
*opp
)
1643 struct kvm_irq_routing_entry
*routing
;
1645 /* Create a nop default map, so that dereferencing it still works */
1646 routing
= kzalloc((sizeof(*routing
)), GFP_KERNEL
);
1650 kvm_set_irq_routing(opp
->kvm
, routing
, 0, 0);
1656 static int mpic_create(struct kvm_device
*dev
, u32 type
)
1658 struct openpic
*opp
;
1661 /* We only support one MPIC at a time for now */
1662 if (dev
->kvm
->arch
.mpic
)
1665 opp
= kzalloc(sizeof(struct openpic
), GFP_KERNEL
);
1670 opp
->kvm
= dev
->kvm
;
1673 spin_lock_init(&opp
->lock
);
1675 add_mmio_region(opp
, &openpic_gbl_mmio
);
1676 add_mmio_region(opp
, &openpic_tmr_mmio
);
1677 add_mmio_region(opp
, &openpic_src_mmio
);
1678 add_mmio_region(opp
, &openpic_cpu_mmio
);
1680 switch (opp
->model
) {
1681 case KVM_DEV_TYPE_FSL_MPIC_20
:
1682 opp
->fsl
= &fsl_mpic_20
;
1683 opp
->brr1
= 0x00400200;
1684 opp
->flags
|= OPENPIC_FLAG_IDR_CRIT
;
1686 opp
->mpic_mode_mask
= GCR_MODE_MIXED
;
1688 fsl_common_init(opp
);
1692 case KVM_DEV_TYPE_FSL_MPIC_42
:
1693 opp
->fsl
= &fsl_mpic_42
;
1694 opp
->brr1
= 0x00400402;
1695 opp
->flags
|= OPENPIC_FLAG_ILR
;
1697 opp
->mpic_mode_mask
= GCR_MODE_PROXY
;
1699 fsl_common_init(opp
);
1708 ret
= mpic_set_default_irq_routing(opp
);
1715 dev
->kvm
->arch
.mpic
= opp
;
1724 struct kvm_device_ops kvm_mpic_ops
= {
1726 .create
= mpic_create
,
1727 .destroy
= mpic_destroy
,
1728 .set_attr
= mpic_set_attr
,
1729 .get_attr
= mpic_get_attr
,
1730 .has_attr
= mpic_has_attr
,
1733 int kvmppc_mpic_connect_vcpu(struct kvm_device
*dev
, struct kvm_vcpu
*vcpu
,
1736 struct openpic
*opp
= dev
->private;
1739 if (dev
->ops
!= &kvm_mpic_ops
)
1741 if (opp
->kvm
!= vcpu
->kvm
)
1743 if (cpu
< 0 || cpu
>= MAX_CPU
)
1746 spin_lock_irq(&opp
->lock
);
1748 if (opp
->dst
[cpu
].vcpu
) {
1752 if (vcpu
->arch
.irq_type
) {
1757 opp
->dst
[cpu
].vcpu
= vcpu
;
1758 opp
->nb_cpus
= max(opp
->nb_cpus
, cpu
+ 1);
1760 vcpu
->arch
.mpic
= opp
;
1761 vcpu
->arch
.irq_cpu_id
= cpu
;
1762 vcpu
->arch
.irq_type
= KVMPPC_IRQ_MPIC
;
1764 /* This might need to be changed if GCR gets extended */
1765 if (opp
->mpic_mode_mask
== GCR_MODE_PROXY
)
1766 vcpu
->arch
.epr_flags
|= KVMPPC_EPR_KERNEL
;
1769 spin_unlock_irq(&opp
->lock
);
1774 * This should only happen immediately before the mpic is destroyed,
1775 * so we shouldn't need to worry about anything still trying to
1776 * access the vcpu pointer.
1778 void kvmppc_mpic_disconnect_vcpu(struct openpic
*opp
, struct kvm_vcpu
*vcpu
)
1780 BUG_ON(!opp
->dst
[vcpu
->arch
.irq_cpu_id
].vcpu
);
1782 opp
->dst
[vcpu
->arch
.irq_cpu_id
].vcpu
= NULL
;
1787 * < 0 Interrupt was ignored (masked or not delivered for other reasons)
1788 * = 0 Interrupt was coalesced (previous irq is still pending)
1789 * > 0 Number of CPUs interrupt was delivered to
1791 static int mpic_set_irq(struct kvm_kernel_irq_routing_entry
*e
,
1792 struct kvm
*kvm
, int irq_source_id
, int level
,
1795 u32 irq
= e
->irqchip
.pin
;
1796 struct openpic
*opp
= kvm
->arch
.mpic
;
1797 unsigned long flags
;
1799 spin_lock_irqsave(&opp
->lock
, flags
);
1800 openpic_set_irq(opp
, irq
, level
);
1801 spin_unlock_irqrestore(&opp
->lock
, flags
);
1803 /* All code paths we care about don't check for the return value */
1807 int kvm_set_msi(struct kvm_kernel_irq_routing_entry
*e
,
1808 struct kvm
*kvm
, int irq_source_id
, int level
, bool line_status
)
1810 struct openpic
*opp
= kvm
->arch
.mpic
;
1811 unsigned long flags
;
1813 spin_lock_irqsave(&opp
->lock
, flags
);
1816 * XXX We ignore the target address for now, as we only support
1817 * a single MSI bank.
1819 openpic_msi_write(kvm
->arch
.mpic
, MSIIR_OFFSET
, e
->msi
.data
);
1820 spin_unlock_irqrestore(&opp
->lock
, flags
);
1822 /* All code paths we care about don't check for the return value */
1826 int kvm_set_routing_entry(struct kvm
*kvm
,
1827 struct kvm_kernel_irq_routing_entry
*e
,
1828 const struct kvm_irq_routing_entry
*ue
)
1833 case KVM_IRQ_ROUTING_IRQCHIP
:
1834 e
->set
= mpic_set_irq
;
1835 e
->irqchip
.irqchip
= ue
->u
.irqchip
.irqchip
;
1836 e
->irqchip
.pin
= ue
->u
.irqchip
.pin
;
1837 if (e
->irqchip
.pin
>= KVM_IRQCHIP_NUM_PINS
)
1840 case KVM_IRQ_ROUTING_MSI
:
1841 e
->set
= kvm_set_msi
;
1842 e
->msi
.address_lo
= ue
->u
.msi
.address_lo
;
1843 e
->msi
.address_hi
= ue
->u
.msi
.address_hi
;
1844 e
->msi
.data
= ue
->u
.msi
.data
;