4 * Copyright (c) 2004 Jocelyn Mayer
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include <linux/slab.h>
27 #include <linux/mutex.h>
28 #include <linux/kvm_host.h>
29 #include <linux/errno.h>
31 #include <linux/anon_inodes.h>
32 #include <asm/uaccess.h>
34 #include <asm/kvm_para.h>
35 #include <asm/kvm_host.h>
36 #include <asm/kvm_ppc.h>
44 #define MAX_IRQ (MAX_SRC + MAX_IPI + MAX_TMR)
45 #define VID 0x03 /* MPIC version ID */
47 /* OpenPIC capability flags */
48 #define OPENPIC_FLAG_IDR_CRIT (1 << 0)
49 #define OPENPIC_FLAG_ILR (2 << 0)
51 /* OpenPIC address map */
52 #define OPENPIC_REG_SIZE 0x40000
53 #define OPENPIC_GLB_REG_START 0x0
54 #define OPENPIC_GLB_REG_SIZE 0x10F0
55 #define OPENPIC_TMR_REG_START 0x10F0
56 #define OPENPIC_TMR_REG_SIZE 0x220
57 #define OPENPIC_MSI_REG_START 0x1600
58 #define OPENPIC_MSI_REG_SIZE 0x200
59 #define OPENPIC_SUMMARY_REG_START 0x3800
60 #define OPENPIC_SUMMARY_REG_SIZE 0x800
61 #define OPENPIC_SRC_REG_START 0x10000
62 #define OPENPIC_SRC_REG_SIZE (MAX_SRC * 0x20)
63 #define OPENPIC_CPU_REG_START 0x20000
64 #define OPENPIC_CPU_REG_SIZE (0x100 + ((MAX_CPU - 1) * 0x1000))
66 struct fsl_mpic_info
{
70 static struct fsl_mpic_info fsl_mpic_20
= {
74 static struct fsl_mpic_info fsl_mpic_42
= {
78 #define FRR_NIRQ_SHIFT 16
79 #define FRR_NCPU_SHIFT 8
80 #define FRR_VID_SHIFT 0
82 #define VID_REVISION_1_2 2
83 #define VID_REVISION_1_3 3
85 #define VIR_GENERIC 0x00000000 /* Generic Vendor ID */
87 #define GCR_RESET 0x80000000
88 #define GCR_MODE_PASS 0x00000000
89 #define GCR_MODE_MIXED 0x20000000
90 #define GCR_MODE_PROXY 0x60000000
92 #define TBCR_CI 0x80000000 /* count inhibit */
93 #define TCCR_TOG 0x80000000 /* toggles when decrement to zero */
95 #define IDR_EP_SHIFT 31
96 #define IDR_EP_MASK (1 << IDR_EP_SHIFT)
97 #define IDR_CI0_SHIFT 30
98 #define IDR_CI1_SHIFT 29
99 #define IDR_P1_SHIFT 1
100 #define IDR_P0_SHIFT 0
102 #define ILR_INTTGT_MASK 0x000000ff
103 #define ILR_INTTGT_INT 0x00
104 #define ILR_INTTGT_CINT 0x01 /* critical */
105 #define ILR_INTTGT_MCP 0x02 /* machine check */
106 #define NUM_OUTPUTS 3
108 #define MSIIR_OFFSET 0x140
109 #define MSIIR_SRS_SHIFT 29
110 #define MSIIR_SRS_MASK (0x7 << MSIIR_SRS_SHIFT)
111 #define MSIIR_IBS_SHIFT 24
112 #define MSIIR_IBS_MASK (0x1f << MSIIR_IBS_SHIFT)
114 static int get_current_cpu(void)
116 #if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
117 struct kvm_vcpu
*vcpu
= current
->thread
.kvm_vcpu
;
118 return vcpu
? vcpu
->arch
.irq_cpu_id
: -1;
125 static int openpic_cpu_write_internal(void *opaque
, gpa_t addr
,
127 static int openpic_cpu_read_internal(void *opaque
, gpa_t addr
,
132 IRQ_TYPE_FSLINT
, /* FSL internal interrupt -- level only */
133 IRQ_TYPE_FSLSPECIAL
, /* FSL timer/IPI interrupt, edge, no polarity */
137 /* Round up to the nearest 64 IRQs so that the queue length
138 * won't change when moving between 32 and 64 bit hosts.
140 unsigned long queue
[BITS_TO_LONGS((MAX_IRQ
+ 63) & ~63)];
146 uint32_t ivpr
; /* IRQ vector/priority register */
147 uint32_t idr
; /* IRQ destination register */
148 uint32_t destmask
; /* bitmap of CPU destinations */
150 int output
; /* IRQ level, e.g. ILR_INTTGT_INT */
151 int pending
; /* TRUE if IRQ is pending */
153 bool level
:1; /* level-triggered */
154 bool nomask
:1; /* critical interrupts ignore mask on some FSL MPICs */
157 #define IVPR_MASK_SHIFT 31
158 #define IVPR_MASK_MASK (1 << IVPR_MASK_SHIFT)
159 #define IVPR_ACTIVITY_SHIFT 30
160 #define IVPR_ACTIVITY_MASK (1 << IVPR_ACTIVITY_SHIFT)
161 #define IVPR_MODE_SHIFT 29
162 #define IVPR_MODE_MASK (1 << IVPR_MODE_SHIFT)
163 #define IVPR_POLARITY_SHIFT 23
164 #define IVPR_POLARITY_MASK (1 << IVPR_POLARITY_SHIFT)
165 #define IVPR_SENSE_SHIFT 22
166 #define IVPR_SENSE_MASK (1 << IVPR_SENSE_SHIFT)
168 #define IVPR_PRIORITY_MASK (0xF << 16)
169 #define IVPR_PRIORITY(_ivprr_) ((int)(((_ivprr_) & IVPR_PRIORITY_MASK) >> 16))
170 #define IVPR_VECTOR(opp, _ivprr_) ((_ivprr_) & (opp)->vector_mask)
172 /* IDR[EP/CI] are only for FSL MPIC prior to v4.0 */
173 #define IDR_EP 0x80000000 /* external pin */
174 #define IDR_CI 0x40000000 /* critical interrupt */
177 struct kvm_vcpu
*vcpu
;
179 int32_t ctpr
; /* CPU current task priority */
180 struct irq_queue raised
;
181 struct irq_queue servicing
;
183 /* Count of IRQ sources asserting on non-INT outputs */
184 uint32_t outputs_active
[NUM_OUTPUTS
];
187 #define MAX_MMIO_REGIONS 10
191 struct kvm_device
*dev
;
192 struct kvm_io_device mmio
;
193 const struct mem_reg
*mmio_regions
[MAX_MMIO_REGIONS
];
194 int num_mmio_regions
;
199 /* Behavior control */
200 struct fsl_mpic_info
*fsl
;
205 uint32_t vir
; /* Vendor identification register */
206 uint32_t vector_mask
;
211 uint32_t mpic_mode_mask
;
213 /* Global registers */
214 uint32_t frr
; /* Feature reporting register */
215 uint32_t gcr
; /* Global configuration register */
216 uint32_t pir
; /* Processor initialization register */
217 uint32_t spve
; /* Spurious vector register */
218 uint32_t tfrr
; /* Timer frequency reporting register */
219 /* Source registers */
220 struct irq_source src
[MAX_IRQ
];
221 /* Local registers per output pin */
222 struct irq_dest dst
[MAX_CPU
];
224 /* Timer registers */
226 uint32_t tccr
; /* Global timer current count register */
227 uint32_t tbcr
; /* Global timer base count register */
229 /* Shared MSI registers */
231 uint32_t msir
; /* Shared Message Signaled Interrupt Register */
240 static void mpic_irq_raise(struct openpic
*opp
, struct irq_dest
*dst
,
243 struct kvm_interrupt irq
= {
244 .irq
= KVM_INTERRUPT_SET_LEVEL
,
248 pr_debug("%s: destination cpu %d does not exist\n",
249 __func__
, (int)(dst
- &opp
->dst
[0]));
253 pr_debug("%s: cpu %d output %d\n", __func__
, dst
->vcpu
->arch
.irq_cpu_id
,
256 if (output
!= ILR_INTTGT_INT
) /* TODO */
259 kvm_vcpu_ioctl_interrupt(dst
->vcpu
, &irq
);
262 static void mpic_irq_lower(struct openpic
*opp
, struct irq_dest
*dst
,
266 pr_debug("%s: destination cpu %d does not exist\n",
267 __func__
, (int)(dst
- &opp
->dst
[0]));
271 pr_debug("%s: cpu %d output %d\n", __func__
, dst
->vcpu
->arch
.irq_cpu_id
,
274 if (output
!= ILR_INTTGT_INT
) /* TODO */
277 kvmppc_core_dequeue_external(dst
->vcpu
);
280 static inline void IRQ_setbit(struct irq_queue
*q
, int n_IRQ
)
282 set_bit(n_IRQ
, q
->queue
);
285 static inline void IRQ_resetbit(struct irq_queue
*q
, int n_IRQ
)
287 clear_bit(n_IRQ
, q
->queue
);
290 static inline int IRQ_testbit(struct irq_queue
*q
, int n_IRQ
)
292 return test_bit(n_IRQ
, q
->queue
);
295 static void IRQ_check(struct openpic
*opp
, struct irq_queue
*q
)
302 irq
= find_next_bit(q
->queue
, opp
->max_irq
, irq
+ 1);
303 if (irq
== opp
->max_irq
)
306 pr_debug("IRQ_check: irq %d set ivpr_pr=%d pr=%d\n",
307 irq
, IVPR_PRIORITY(opp
->src
[irq
].ivpr
), priority
);
309 if (IVPR_PRIORITY(opp
->src
[irq
].ivpr
) > priority
) {
311 priority
= IVPR_PRIORITY(opp
->src
[irq
].ivpr
);
316 q
->priority
= priority
;
319 static int IRQ_get_next(struct openpic
*opp
, struct irq_queue
*q
)
327 static void IRQ_local_pipe(struct openpic
*opp
, int n_CPU
, int n_IRQ
,
328 bool active
, bool was_active
)
330 struct irq_dest
*dst
;
331 struct irq_source
*src
;
334 dst
= &opp
->dst
[n_CPU
];
335 src
= &opp
->src
[n_IRQ
];
337 pr_debug("%s: IRQ %d active %d was %d\n",
338 __func__
, n_IRQ
, active
, was_active
);
340 if (src
->output
!= ILR_INTTGT_INT
) {
341 pr_debug("%s: output %d irq %d active %d was %d count %d\n",
342 __func__
, src
->output
, n_IRQ
, active
, was_active
,
343 dst
->outputs_active
[src
->output
]);
345 /* On Freescale MPIC, critical interrupts ignore priority,
346 * IACK, EOI, etc. Before MPIC v4.1 they also ignore
351 dst
->outputs_active
[src
->output
]++ == 0) {
352 pr_debug("%s: Raise OpenPIC output %d cpu %d irq %d\n",
353 __func__
, src
->output
, n_CPU
, n_IRQ
);
354 mpic_irq_raise(opp
, dst
, src
->output
);
358 --dst
->outputs_active
[src
->output
] == 0) {
359 pr_debug("%s: Lower OpenPIC output %d cpu %d irq %d\n",
360 __func__
, src
->output
, n_CPU
, n_IRQ
);
361 mpic_irq_lower(opp
, dst
, src
->output
);
368 priority
= IVPR_PRIORITY(src
->ivpr
);
370 /* Even if the interrupt doesn't have enough priority,
371 * it is still raised, in case ctpr is lowered later.
374 IRQ_setbit(&dst
->raised
, n_IRQ
);
376 IRQ_resetbit(&dst
->raised
, n_IRQ
);
378 IRQ_check(opp
, &dst
->raised
);
380 if (active
&& priority
<= dst
->ctpr
) {
381 pr_debug("%s: IRQ %d priority %d too low for ctpr %d on CPU %d\n",
382 __func__
, n_IRQ
, priority
, dst
->ctpr
, n_CPU
);
387 if (IRQ_get_next(opp
, &dst
->servicing
) >= 0 &&
388 priority
<= dst
->servicing
.priority
) {
389 pr_debug("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d\n",
390 __func__
, n_IRQ
, dst
->servicing
.next
, n_CPU
);
392 pr_debug("%s: Raise OpenPIC INT output cpu %d irq %d/%d\n",
393 __func__
, n_CPU
, n_IRQ
, dst
->raised
.next
);
394 mpic_irq_raise(opp
, dst
, ILR_INTTGT_INT
);
397 IRQ_get_next(opp
, &dst
->servicing
);
398 if (dst
->raised
.priority
> dst
->ctpr
&&
399 dst
->raised
.priority
> dst
->servicing
.priority
) {
400 pr_debug("%s: IRQ %d inactive, IRQ %d prio %d above %d/%d, CPU %d\n",
401 __func__
, n_IRQ
, dst
->raised
.next
,
402 dst
->raised
.priority
, dst
->ctpr
,
403 dst
->servicing
.priority
, n_CPU
);
404 /* IRQ line stays asserted */
406 pr_debug("%s: IRQ %d inactive, current prio %d/%d, CPU %d\n",
407 __func__
, n_IRQ
, dst
->ctpr
,
408 dst
->servicing
.priority
, n_CPU
);
409 mpic_irq_lower(opp
, dst
, ILR_INTTGT_INT
);
414 /* update pic state because registers for n_IRQ have changed value */
415 static void openpic_update_irq(struct openpic
*opp
, int n_IRQ
)
417 struct irq_source
*src
;
418 bool active
, was_active
;
421 src
= &opp
->src
[n_IRQ
];
422 active
= src
->pending
;
424 if ((src
->ivpr
& IVPR_MASK_MASK
) && !src
->nomask
) {
425 /* Interrupt source is disabled */
426 pr_debug("%s: IRQ %d is disabled\n", __func__
, n_IRQ
);
430 was_active
= !!(src
->ivpr
& IVPR_ACTIVITY_MASK
);
433 * We don't have a similar check for already-active because
434 * ctpr may have changed and we need to withdraw the interrupt.
436 if (!active
&& !was_active
) {
437 pr_debug("%s: IRQ %d is already inactive\n", __func__
, n_IRQ
);
442 src
->ivpr
|= IVPR_ACTIVITY_MASK
;
444 src
->ivpr
&= ~IVPR_ACTIVITY_MASK
;
446 if (src
->destmask
== 0) {
448 pr_debug("%s: IRQ %d has no target\n", __func__
, n_IRQ
);
452 if (src
->destmask
== (1 << src
->last_cpu
)) {
453 /* Only one CPU is allowed to receive this IRQ */
454 IRQ_local_pipe(opp
, src
->last_cpu
, n_IRQ
, active
, was_active
);
455 } else if (!(src
->ivpr
& IVPR_MODE_MASK
)) {
456 /* Directed delivery mode */
457 for (i
= 0; i
< opp
->nb_cpus
; i
++) {
458 if (src
->destmask
& (1 << i
)) {
459 IRQ_local_pipe(opp
, i
, n_IRQ
, active
,
464 /* Distributed delivery mode */
465 for (i
= src
->last_cpu
+ 1; i
!= src
->last_cpu
; i
++) {
466 if (i
== opp
->nb_cpus
)
469 if (src
->destmask
& (1 << i
)) {
470 IRQ_local_pipe(opp
, i
, n_IRQ
, active
,
479 static void openpic_set_irq(void *opaque
, int n_IRQ
, int level
)
481 struct openpic
*opp
= opaque
;
482 struct irq_source
*src
;
484 if (n_IRQ
>= MAX_IRQ
) {
485 WARN_ONCE(1, "%s: IRQ %d out of range\n", __func__
, n_IRQ
);
489 src
= &opp
->src
[n_IRQ
];
490 pr_debug("openpic: set irq %d = %d ivpr=0x%08x\n",
491 n_IRQ
, level
, src
->ivpr
);
493 /* level-sensitive irq */
494 src
->pending
= level
;
495 openpic_update_irq(opp
, n_IRQ
);
497 /* edge-sensitive irq */
500 openpic_update_irq(opp
, n_IRQ
);
503 if (src
->output
!= ILR_INTTGT_INT
) {
504 /* Edge-triggered interrupts shouldn't be used
505 * with non-INT delivery, but just in case,
506 * try to make it do something sane rather than
507 * cause an interrupt storm. This is close to
508 * what you'd probably see happen in real hardware.
511 openpic_update_irq(opp
, n_IRQ
);
516 static void openpic_reset(struct openpic
*opp
)
520 opp
->gcr
= GCR_RESET
;
521 /* Initialise controller registers */
522 opp
->frr
= ((opp
->nb_irqs
- 1) << FRR_NIRQ_SHIFT
) |
523 (opp
->vid
<< FRR_VID_SHIFT
);
526 opp
->spve
= -1 & opp
->vector_mask
;
527 opp
->tfrr
= opp
->tfrr_reset
;
528 /* Initialise IRQ sources */
529 for (i
= 0; i
< opp
->max_irq
; i
++) {
530 opp
->src
[i
].ivpr
= opp
->ivpr_reset
;
531 opp
->src
[i
].idr
= opp
->idr_reset
;
533 switch (opp
->src
[i
].type
) {
534 case IRQ_TYPE_NORMAL
:
536 !!(opp
->ivpr_reset
& IVPR_SENSE_MASK
);
539 case IRQ_TYPE_FSLINT
:
540 opp
->src
[i
].ivpr
|= IVPR_POLARITY_MASK
;
543 case IRQ_TYPE_FSLSPECIAL
:
547 /* Initialise IRQ destinations */
548 for (i
= 0; i
< MAX_CPU
; i
++) {
549 opp
->dst
[i
].ctpr
= 15;
550 memset(&opp
->dst
[i
].raised
, 0, sizeof(struct irq_queue
));
551 opp
->dst
[i
].raised
.next
= -1;
552 memset(&opp
->dst
[i
].servicing
, 0, sizeof(struct irq_queue
));
553 opp
->dst
[i
].servicing
.next
= -1;
555 /* Initialise timers */
556 for (i
= 0; i
< MAX_TMR
; i
++) {
557 opp
->timers
[i
].tccr
= 0;
558 opp
->timers
[i
].tbcr
= TBCR_CI
;
560 /* Go out of RESET state */
564 static inline uint32_t read_IRQreg_idr(struct openpic
*opp
, int n_IRQ
)
566 return opp
->src
[n_IRQ
].idr
;
569 static inline uint32_t read_IRQreg_ilr(struct openpic
*opp
, int n_IRQ
)
571 if (opp
->flags
& OPENPIC_FLAG_ILR
)
572 return opp
->src
[n_IRQ
].output
;
577 static inline uint32_t read_IRQreg_ivpr(struct openpic
*opp
, int n_IRQ
)
579 return opp
->src
[n_IRQ
].ivpr
;
582 static inline void write_IRQreg_idr(struct openpic
*opp
, int n_IRQ
,
585 struct irq_source
*src
= &opp
->src
[n_IRQ
];
586 uint32_t normal_mask
= (1UL << opp
->nb_cpus
) - 1;
587 uint32_t crit_mask
= 0;
588 uint32_t mask
= normal_mask
;
589 int crit_shift
= IDR_EP_SHIFT
- opp
->nb_cpus
;
592 if (opp
->flags
& OPENPIC_FLAG_IDR_CRIT
) {
593 crit_mask
= mask
<< crit_shift
;
594 mask
|= crit_mask
| IDR_EP
;
597 src
->idr
= val
& mask
;
598 pr_debug("Set IDR %d to 0x%08x\n", n_IRQ
, src
->idr
);
600 if (opp
->flags
& OPENPIC_FLAG_IDR_CRIT
) {
601 if (src
->idr
& crit_mask
) {
602 if (src
->idr
& normal_mask
) {
603 pr_debug("%s: IRQ configured for multiple output types, using critical\n",
607 src
->output
= ILR_INTTGT_CINT
;
611 for (i
= 0; i
< opp
->nb_cpus
; i
++) {
612 int n_ci
= IDR_CI0_SHIFT
- i
;
614 if (src
->idr
& (1UL << n_ci
))
615 src
->destmask
|= 1UL << i
;
618 src
->output
= ILR_INTTGT_INT
;
620 src
->destmask
= src
->idr
& normal_mask
;
623 src
->destmask
= src
->idr
;
627 static inline void write_IRQreg_ilr(struct openpic
*opp
, int n_IRQ
,
630 if (opp
->flags
& OPENPIC_FLAG_ILR
) {
631 struct irq_source
*src
= &opp
->src
[n_IRQ
];
633 src
->output
= val
& ILR_INTTGT_MASK
;
634 pr_debug("Set ILR %d to 0x%08x, output %d\n", n_IRQ
, src
->idr
,
637 /* TODO: on MPIC v4.0 only, set nomask for non-INT */
641 static inline void write_IRQreg_ivpr(struct openpic
*opp
, int n_IRQ
,
646 /* NOTE when implementing newer FSL MPIC models: starting with v4.0,
647 * the polarity bit is read-only on internal interrupts.
649 mask
= IVPR_MASK_MASK
| IVPR_PRIORITY_MASK
| IVPR_SENSE_MASK
|
650 IVPR_POLARITY_MASK
| opp
->vector_mask
;
652 /* ACTIVITY bit is read-only */
653 opp
->src
[n_IRQ
].ivpr
=
654 (opp
->src
[n_IRQ
].ivpr
& IVPR_ACTIVITY_MASK
) | (val
& mask
);
656 /* For FSL internal interrupts, The sense bit is reserved and zero,
657 * and the interrupt is always level-triggered. Timers and IPIs
658 * have no sense or polarity bits, and are edge-triggered.
660 switch (opp
->src
[n_IRQ
].type
) {
661 case IRQ_TYPE_NORMAL
:
662 opp
->src
[n_IRQ
].level
=
663 !!(opp
->src
[n_IRQ
].ivpr
& IVPR_SENSE_MASK
);
666 case IRQ_TYPE_FSLINT
:
667 opp
->src
[n_IRQ
].ivpr
&= ~IVPR_SENSE_MASK
;
670 case IRQ_TYPE_FSLSPECIAL
:
671 opp
->src
[n_IRQ
].ivpr
&= ~(IVPR_POLARITY_MASK
| IVPR_SENSE_MASK
);
675 openpic_update_irq(opp
, n_IRQ
);
676 pr_debug("Set IVPR %d to 0x%08x -> 0x%08x\n", n_IRQ
, val
,
677 opp
->src
[n_IRQ
].ivpr
);
680 static void openpic_gcr_write(struct openpic
*opp
, uint64_t val
)
682 if (val
& GCR_RESET
) {
687 opp
->gcr
&= ~opp
->mpic_mode_mask
;
688 opp
->gcr
|= val
& opp
->mpic_mode_mask
;
691 static int openpic_gbl_write(void *opaque
, gpa_t addr
, u32 val
)
693 struct openpic
*opp
= opaque
;
696 pr_debug("%s: addr %#llx <= %08x\n", __func__
, addr
, val
);
701 case 0x00: /* Block Revision Register1 (BRR1) is Readonly */
711 err
= openpic_cpu_write_internal(opp
, addr
, val
,
714 case 0x1000: /* FRR */
716 case 0x1020: /* GCR */
717 openpic_gcr_write(opp
, val
);
719 case 0x1080: /* VIR */
721 case 0x1090: /* PIR */
723 * This register is used to reset a CPU core --
724 * let userspace handle it.
728 case 0x10A0: /* IPI_IVPR */
733 idx
= (addr
- 0x10A0) >> 4;
734 write_IRQreg_ivpr(opp
, opp
->irq_ipi0
+ idx
, val
);
737 case 0x10E0: /* SPVE */
738 opp
->spve
= val
& opp
->vector_mask
;
747 static int openpic_gbl_read(void *opaque
, gpa_t addr
, u32
*ptr
)
749 struct openpic
*opp
= opaque
;
753 pr_debug("%s: addr %#llx\n", __func__
, addr
);
759 case 0x1000: /* FRR */
761 retval
|= (opp
->nb_cpus
- 1) << FRR_NCPU_SHIFT
;
763 case 0x1020: /* GCR */
766 case 0x1080: /* VIR */
769 case 0x1090: /* PIR */
772 case 0x00: /* Block Revision Register1 (BRR1) */
783 err
= openpic_cpu_read_internal(opp
, addr
,
784 &retval
, get_current_cpu());
786 case 0x10A0: /* IPI_IVPR */
792 idx
= (addr
- 0x10A0) >> 4;
793 retval
= read_IRQreg_ivpr(opp
, opp
->irq_ipi0
+ idx
);
796 case 0x10E0: /* SPVE */
804 pr_debug("%s: => 0x%08x\n", __func__
, retval
);
809 static int openpic_tmr_write(void *opaque
, gpa_t addr
, u32 val
)
811 struct openpic
*opp
= opaque
;
816 pr_debug("%s: addr %#llx <= %08x\n", __func__
, addr
, val
);
820 if (addr
== 0x10f0) {
826 idx
= (addr
>> 6) & 0x3;
829 switch (addr
& 0x30) {
830 case 0x00: /* TCCR */
832 case 0x10: /* TBCR */
833 if ((opp
->timers
[idx
].tccr
& TCCR_TOG
) != 0 &&
834 (val
& TBCR_CI
) == 0 &&
835 (opp
->timers
[idx
].tbcr
& TBCR_CI
) != 0)
836 opp
->timers
[idx
].tccr
&= ~TCCR_TOG
;
838 opp
->timers
[idx
].tbcr
= val
;
840 case 0x20: /* TVPR */
841 write_IRQreg_ivpr(opp
, opp
->irq_tim0
+ idx
, val
);
844 write_IRQreg_idr(opp
, opp
->irq_tim0
+ idx
, val
);
851 static int openpic_tmr_read(void *opaque
, gpa_t addr
, u32
*ptr
)
853 struct openpic
*opp
= opaque
;
854 uint32_t retval
= -1;
857 pr_debug("%s: addr %#llx\n", __func__
, addr
);
861 idx
= (addr
>> 6) & 0x3;
868 switch (addr
& 0x30) {
869 case 0x00: /* TCCR */
870 retval
= opp
->timers
[idx
].tccr
;
872 case 0x10: /* TBCR */
873 retval
= opp
->timers
[idx
].tbcr
;
875 case 0x20: /* TIPV */
876 retval
= read_IRQreg_ivpr(opp
, opp
->irq_tim0
+ idx
);
878 case 0x30: /* TIDE (TIDR) */
879 retval
= read_IRQreg_idr(opp
, opp
->irq_tim0
+ idx
);
884 pr_debug("%s: => 0x%08x\n", __func__
, retval
);
889 static int openpic_src_write(void *opaque
, gpa_t addr
, u32 val
)
891 struct openpic
*opp
= opaque
;
894 pr_debug("%s: addr %#llx <= %08x\n", __func__
, addr
, val
);
896 addr
= addr
& 0xffff;
899 switch (addr
& 0x1f) {
901 write_IRQreg_ivpr(opp
, idx
, val
);
904 write_IRQreg_idr(opp
, idx
, val
);
907 write_IRQreg_ilr(opp
, idx
, val
);
914 static int openpic_src_read(void *opaque
, gpa_t addr
, u32
*ptr
)
916 struct openpic
*opp
= opaque
;
920 pr_debug("%s: addr %#llx\n", __func__
, addr
);
923 addr
= addr
& 0xffff;
926 switch (addr
& 0x1f) {
928 retval
= read_IRQreg_ivpr(opp
, idx
);
931 retval
= read_IRQreg_idr(opp
, idx
);
934 retval
= read_IRQreg_ilr(opp
, idx
);
938 pr_debug("%s: => 0x%08x\n", __func__
, retval
);
943 static int openpic_msi_write(void *opaque
, gpa_t addr
, u32 val
)
945 struct openpic
*opp
= opaque
;
946 int idx
= opp
->irq_msi
;
949 pr_debug("%s: addr %#llx <= 0x%08x\n", __func__
, addr
, val
);
955 srs
= val
>> MSIIR_SRS_SHIFT
;
957 ibs
= (val
& MSIIR_IBS_MASK
) >> MSIIR_IBS_SHIFT
;
958 opp
->msi
[srs
].msir
|= 1 << ibs
;
959 openpic_set_irq(opp
, idx
, 1);
962 /* most registers are read-only, thus ignored */
969 static int openpic_msi_read(void *opaque
, gpa_t addr
, u32
*ptr
)
971 struct openpic
*opp
= opaque
;
975 pr_debug("%s: addr %#llx\n", __func__
, addr
);
989 case 0x70: /* MSIRs */
990 r
= opp
->msi
[srs
].msir
;
992 opp
->msi
[srs
].msir
= 0;
993 openpic_set_irq(opp
, opp
->irq_msi
+ srs
, 0);
995 case 0x120: /* MSISR */
996 for (i
= 0; i
< MAX_MSI
; i
++)
997 r
|= (opp
->msi
[i
].msir
? 1 : 0) << i
;
1001 pr_debug("%s: => 0x%08x\n", __func__
, r
);
1006 static int openpic_summary_read(void *opaque
, gpa_t addr
, u32
*ptr
)
1010 pr_debug("%s: addr %#llx\n", __func__
, addr
);
1012 /* TODO: EISR/EIMR */
1018 static int openpic_summary_write(void *opaque
, gpa_t addr
, u32 val
)
1020 pr_debug("%s: addr %#llx <= 0x%08x\n", __func__
, addr
, val
);
1022 /* TODO: EISR/EIMR */
1026 static int openpic_cpu_write_internal(void *opaque
, gpa_t addr
,
1029 struct openpic
*opp
= opaque
;
1030 struct irq_source
*src
;
1031 struct irq_dest
*dst
;
1034 pr_debug("%s: cpu %d addr %#llx <= 0x%08x\n", __func__
, idx
,
1043 dst
= &opp
->dst
[idx
];
1046 case 0x40: /* IPIDR */
1050 idx
= (addr
- 0x40) >> 4;
1051 /* we use IDE as mask which CPUs to deliver the IPI to still. */
1052 opp
->src
[opp
->irq_ipi0
+ idx
].destmask
|= val
;
1053 openpic_set_irq(opp
, opp
->irq_ipi0
+ idx
, 1);
1054 openpic_set_irq(opp
, opp
->irq_ipi0
+ idx
, 0);
1056 case 0x80: /* CTPR */
1057 dst
->ctpr
= val
& 0x0000000F;
1059 pr_debug("%s: set CPU %d ctpr to %d, raised %d servicing %d\n",
1060 __func__
, idx
, dst
->ctpr
, dst
->raised
.priority
,
1061 dst
->servicing
.priority
);
1063 if (dst
->raised
.priority
<= dst
->ctpr
) {
1064 pr_debug("%s: Lower OpenPIC INT output cpu %d due to ctpr\n",
1066 mpic_irq_lower(opp
, dst
, ILR_INTTGT_INT
);
1067 } else if (dst
->raised
.priority
> dst
->servicing
.priority
) {
1068 pr_debug("%s: Raise OpenPIC INT output cpu %d irq %d\n",
1069 __func__
, idx
, dst
->raised
.next
);
1070 mpic_irq_raise(opp
, dst
, ILR_INTTGT_INT
);
1074 case 0x90: /* WHOAMI */
1075 /* Read-only register */
1077 case 0xA0: /* IACK */
1078 /* Read-only register */
1080 case 0xB0: { /* EOI */
1084 s_IRQ
= IRQ_get_next(opp
, &dst
->servicing
);
1087 pr_debug("%s: EOI with no interrupt in service\n",
1092 IRQ_resetbit(&dst
->servicing
, s_IRQ
);
1093 /* Notify listeners that the IRQ is over */
1095 /* Set up next servicing IRQ */
1096 s_IRQ
= IRQ_get_next(opp
, &dst
->servicing
);
1097 /* Check queued interrupts. */
1098 n_IRQ
= IRQ_get_next(opp
, &dst
->raised
);
1099 src
= &opp
->src
[n_IRQ
];
1102 IVPR_PRIORITY(src
->ivpr
) > dst
->servicing
.priority
)) {
1103 pr_debug("Raise OpenPIC INT output cpu %d irq %d\n",
1105 mpic_irq_raise(opp
, dst
, ILR_INTTGT_INT
);
1108 spin_unlock(&opp
->lock
);
1109 kvm_notify_acked_irq(opp
->kvm
, 0, notify_eoi
);
1110 spin_lock(&opp
->lock
);
1121 static int openpic_cpu_write(void *opaque
, gpa_t addr
, u32 val
)
1123 struct openpic
*opp
= opaque
;
1125 return openpic_cpu_write_internal(opp
, addr
, val
,
1126 (addr
& 0x1f000) >> 12);
1129 static uint32_t openpic_iack(struct openpic
*opp
, struct irq_dest
*dst
,
1132 struct irq_source
*src
;
1135 pr_debug("Lower OpenPIC INT output\n");
1136 mpic_irq_lower(opp
, dst
, ILR_INTTGT_INT
);
1138 irq
= IRQ_get_next(opp
, &dst
->raised
);
1139 pr_debug("IACK: irq=%d\n", irq
);
1142 /* No more interrupt pending */
1145 src
= &opp
->src
[irq
];
1146 if (!(src
->ivpr
& IVPR_ACTIVITY_MASK
) ||
1147 !(IVPR_PRIORITY(src
->ivpr
) > dst
->ctpr
)) {
1148 pr_err("%s: bad raised IRQ %d ctpr %d ivpr 0x%08x\n",
1149 __func__
, irq
, dst
->ctpr
, src
->ivpr
);
1150 openpic_update_irq(opp
, irq
);
1153 /* IRQ enter servicing state */
1154 IRQ_setbit(&dst
->servicing
, irq
);
1155 retval
= IVPR_VECTOR(opp
, src
->ivpr
);
1159 /* edge-sensitive IRQ */
1160 src
->ivpr
&= ~IVPR_ACTIVITY_MASK
;
1162 IRQ_resetbit(&dst
->raised
, irq
);
1165 if ((irq
>= opp
->irq_ipi0
) && (irq
< (opp
->irq_ipi0
+ MAX_IPI
))) {
1166 src
->destmask
&= ~(1 << cpu
);
1167 if (src
->destmask
&& !src
->level
) {
1168 /* trigger on CPUs that didn't know about it yet */
1169 openpic_set_irq(opp
, irq
, 1);
1170 openpic_set_irq(opp
, irq
, 0);
1171 /* if all CPUs knew about it, set active bit again */
1172 src
->ivpr
|= IVPR_ACTIVITY_MASK
;
1179 void kvmppc_mpic_set_epr(struct kvm_vcpu
*vcpu
)
1181 struct openpic
*opp
= vcpu
->arch
.mpic
;
1182 int cpu
= vcpu
->arch
.irq_cpu_id
;
1183 unsigned long flags
;
1185 spin_lock_irqsave(&opp
->lock
, flags
);
1187 if ((opp
->gcr
& opp
->mpic_mode_mask
) == GCR_MODE_PROXY
)
1188 kvmppc_set_epr(vcpu
, openpic_iack(opp
, &opp
->dst
[cpu
], cpu
));
1190 spin_unlock_irqrestore(&opp
->lock
, flags
);
1193 static int openpic_cpu_read_internal(void *opaque
, gpa_t addr
,
1196 struct openpic
*opp
= opaque
;
1197 struct irq_dest
*dst
;
1200 pr_debug("%s: cpu %d addr %#llx\n", __func__
, idx
, addr
);
1201 retval
= 0xFFFFFFFF;
1209 dst
= &opp
->dst
[idx
];
1212 case 0x80: /* CTPR */
1215 case 0x90: /* WHOAMI */
1218 case 0xA0: /* IACK */
1219 retval
= openpic_iack(opp
, dst
, idx
);
1221 case 0xB0: /* EOI */
1227 pr_debug("%s: => 0x%08x\n", __func__
, retval
);
1234 static int openpic_cpu_read(void *opaque
, gpa_t addr
, u32
*ptr
)
1236 struct openpic
*opp
= opaque
;
1238 return openpic_cpu_read_internal(opp
, addr
, ptr
,
1239 (addr
& 0x1f000) >> 12);
1243 int (*read
)(void *opaque
, gpa_t addr
, u32
*ptr
);
1244 int (*write
)(void *opaque
, gpa_t addr
, u32 val
);
1249 static const struct mem_reg openpic_gbl_mmio
= {
1250 .write
= openpic_gbl_write
,
1251 .read
= openpic_gbl_read
,
1252 .start_addr
= OPENPIC_GLB_REG_START
,
1253 .size
= OPENPIC_GLB_REG_SIZE
,
1256 static const struct mem_reg openpic_tmr_mmio
= {
1257 .write
= openpic_tmr_write
,
1258 .read
= openpic_tmr_read
,
1259 .start_addr
= OPENPIC_TMR_REG_START
,
1260 .size
= OPENPIC_TMR_REG_SIZE
,
1263 static const struct mem_reg openpic_cpu_mmio
= {
1264 .write
= openpic_cpu_write
,
1265 .read
= openpic_cpu_read
,
1266 .start_addr
= OPENPIC_CPU_REG_START
,
1267 .size
= OPENPIC_CPU_REG_SIZE
,
1270 static const struct mem_reg openpic_src_mmio
= {
1271 .write
= openpic_src_write
,
1272 .read
= openpic_src_read
,
1273 .start_addr
= OPENPIC_SRC_REG_START
,
1274 .size
= OPENPIC_SRC_REG_SIZE
,
1277 static const struct mem_reg openpic_msi_mmio
= {
1278 .read
= openpic_msi_read
,
1279 .write
= openpic_msi_write
,
1280 .start_addr
= OPENPIC_MSI_REG_START
,
1281 .size
= OPENPIC_MSI_REG_SIZE
,
1284 static const struct mem_reg openpic_summary_mmio
= {
1285 .read
= openpic_summary_read
,
1286 .write
= openpic_summary_write
,
1287 .start_addr
= OPENPIC_SUMMARY_REG_START
,
1288 .size
= OPENPIC_SUMMARY_REG_SIZE
,
1291 static void add_mmio_region(struct openpic
*opp
, const struct mem_reg
*mr
)
1293 if (opp
->num_mmio_regions
>= MAX_MMIO_REGIONS
) {
1294 WARN(1, "kvm mpic: too many mmio regions\n");
1298 opp
->mmio_regions
[opp
->num_mmio_regions
++] = mr
;
1301 static void fsl_common_init(struct openpic
*opp
)
1306 add_mmio_region(opp
, &openpic_msi_mmio
);
1307 add_mmio_region(opp
, &openpic_summary_mmio
);
1309 opp
->vid
= VID_REVISION_1_2
;
1310 opp
->vir
= VIR_GENERIC
;
1311 opp
->vector_mask
= 0xFFFF;
1312 opp
->tfrr_reset
= 0;
1313 opp
->ivpr_reset
= IVPR_MASK_MASK
;
1314 opp
->idr_reset
= 1 << 0;
1315 opp
->max_irq
= MAX_IRQ
;
1317 opp
->irq_ipi0
= virq
;
1319 opp
->irq_tim0
= virq
;
1322 BUG_ON(virq
> MAX_IRQ
);
1326 for (i
= 0; i
< opp
->fsl
->max_ext
; i
++)
1327 opp
->src
[i
].level
= false;
1329 /* Internal interrupts, including message and MSI */
1330 for (i
= 16; i
< MAX_SRC
; i
++) {
1331 opp
->src
[i
].type
= IRQ_TYPE_FSLINT
;
1332 opp
->src
[i
].level
= true;
1335 /* timers and IPIs */
1336 for (i
= MAX_SRC
; i
< virq
; i
++) {
1337 opp
->src
[i
].type
= IRQ_TYPE_FSLSPECIAL
;
1338 opp
->src
[i
].level
= false;
1342 static int kvm_mpic_read_internal(struct openpic
*opp
, gpa_t addr
, u32
*ptr
)
1346 for (i
= 0; i
< opp
->num_mmio_regions
; i
++) {
1347 const struct mem_reg
*mr
= opp
->mmio_regions
[i
];
1349 if (mr
->start_addr
> addr
|| addr
>= mr
->start_addr
+ mr
->size
)
1352 return mr
->read(opp
, addr
- mr
->start_addr
, ptr
);
1358 static int kvm_mpic_write_internal(struct openpic
*opp
, gpa_t addr
, u32 val
)
1362 for (i
= 0; i
< opp
->num_mmio_regions
; i
++) {
1363 const struct mem_reg
*mr
= opp
->mmio_regions
[i
];
1365 if (mr
->start_addr
> addr
|| addr
>= mr
->start_addr
+ mr
->size
)
1368 return mr
->write(opp
, addr
- mr
->start_addr
, val
);
1374 static int kvm_mpic_read(struct kvm_io_device
*this, gpa_t addr
,
1377 struct openpic
*opp
= container_of(this, struct openpic
, mmio
);
1384 if (addr
& (len
- 1)) {
1385 pr_debug("%s: bad alignment %llx/%d\n",
1386 __func__
, addr
, len
);
1390 spin_lock_irq(&opp
->lock
);
1391 ret
= kvm_mpic_read_internal(opp
, addr
- opp
->reg_base
, &u
.val
);
1392 spin_unlock_irq(&opp
->lock
);
1395 * Technically only 32-bit accesses are allowed, but be nice to
1396 * people dumping registers a byte at a time -- it works in real
1397 * hardware (reads only, not writes).
1400 *(u32
*)ptr
= u
.val
;
1401 pr_debug("%s: addr %llx ret %d len 4 val %x\n",
1402 __func__
, addr
, ret
, u
.val
);
1403 } else if (len
== 1) {
1404 *(u8
*)ptr
= u
.bytes
[addr
& 3];
1405 pr_debug("%s: addr %llx ret %d len 1 val %x\n",
1406 __func__
, addr
, ret
, u
.bytes
[addr
& 3]);
1408 pr_debug("%s: bad length %d\n", __func__
, len
);
1415 static int kvm_mpic_write(struct kvm_io_device
*this, gpa_t addr
,
1416 int len
, const void *ptr
)
1418 struct openpic
*opp
= container_of(this, struct openpic
, mmio
);
1422 pr_debug("%s: bad length %d\n", __func__
, len
);
1426 pr_debug("%s: bad alignment %llx/%d\n", __func__
, addr
, len
);
1430 spin_lock_irq(&opp
->lock
);
1431 ret
= kvm_mpic_write_internal(opp
, addr
- opp
->reg_base
,
1433 spin_unlock_irq(&opp
->lock
);
1435 pr_debug("%s: addr %llx ret %d val %x\n",
1436 __func__
, addr
, ret
, *(const u32
*)ptr
);
1441 static const struct kvm_io_device_ops mpic_mmio_ops
= {
1442 .read
= kvm_mpic_read
,
1443 .write
= kvm_mpic_write
,
1446 static void map_mmio(struct openpic
*opp
)
1448 kvm_iodevice_init(&opp
->mmio
, &mpic_mmio_ops
);
1450 kvm_io_bus_register_dev(opp
->kvm
, KVM_MMIO_BUS
,
1451 opp
->reg_base
, OPENPIC_REG_SIZE
,
1455 static void unmap_mmio(struct openpic
*opp
)
1457 kvm_io_bus_unregister_dev(opp
->kvm
, KVM_MMIO_BUS
, &opp
->mmio
);
1460 static int set_base_addr(struct openpic
*opp
, struct kvm_device_attr
*attr
)
1464 if (copy_from_user(&base
, (u64 __user
*)(long)attr
->addr
, sizeof(u64
)))
1467 if (base
& 0x3ffff) {
1468 pr_debug("kvm mpic %s: KVM_DEV_MPIC_BASE_ADDR %08llx not aligned\n",
1473 if (base
== opp
->reg_base
)
1476 mutex_lock(&opp
->kvm
->slots_lock
);
1479 opp
->reg_base
= base
;
1481 pr_debug("kvm mpic %s: KVM_DEV_MPIC_BASE_ADDR %08llx\n",
1490 mutex_unlock(&opp
->kvm
->slots_lock
);
1497 static int access_reg(struct openpic
*opp
, gpa_t addr
, u32
*val
, int type
)
1504 spin_lock_irq(&opp
->lock
);
1506 if (type
== ATTR_SET
)
1507 ret
= kvm_mpic_write_internal(opp
, addr
, *val
);
1509 ret
= kvm_mpic_read_internal(opp
, addr
, val
);
1511 spin_unlock_irq(&opp
->lock
);
1513 pr_debug("%s: type %d addr %llx val %x\n", __func__
, type
, addr
, *val
);
1518 static int mpic_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1520 struct openpic
*opp
= dev
->private;
1523 switch (attr
->group
) {
1524 case KVM_DEV_MPIC_GRP_MISC
:
1525 switch (attr
->attr
) {
1526 case KVM_DEV_MPIC_BASE_ADDR
:
1527 return set_base_addr(opp
, attr
);
1532 case KVM_DEV_MPIC_GRP_REGISTER
:
1533 if (get_user(attr32
, (u32 __user
*)(long)attr
->addr
))
1536 return access_reg(opp
, attr
->attr
, &attr32
, ATTR_SET
);
1538 case KVM_DEV_MPIC_GRP_IRQ_ACTIVE
:
1539 if (attr
->attr
> MAX_SRC
)
1542 if (get_user(attr32
, (u32 __user
*)(long)attr
->addr
))
1545 if (attr32
!= 0 && attr32
!= 1)
1548 spin_lock_irq(&opp
->lock
);
1549 openpic_set_irq(opp
, attr
->attr
, attr32
);
1550 spin_unlock_irq(&opp
->lock
);
1557 static int mpic_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1559 struct openpic
*opp
= dev
->private;
1564 switch (attr
->group
) {
1565 case KVM_DEV_MPIC_GRP_MISC
:
1566 switch (attr
->attr
) {
1567 case KVM_DEV_MPIC_BASE_ADDR
:
1568 mutex_lock(&opp
->kvm
->slots_lock
);
1569 attr64
= opp
->reg_base
;
1570 mutex_unlock(&opp
->kvm
->slots_lock
);
1572 if (copy_to_user((u64 __user
*)(long)attr
->addr
,
1573 &attr64
, sizeof(u64
)))
1581 case KVM_DEV_MPIC_GRP_REGISTER
:
1582 ret
= access_reg(opp
, attr
->attr
, &attr32
, ATTR_GET
);
1586 if (put_user(attr32
, (u32 __user
*)(long)attr
->addr
))
1591 case KVM_DEV_MPIC_GRP_IRQ_ACTIVE
:
1592 if (attr
->attr
> MAX_SRC
)
1595 spin_lock_irq(&opp
->lock
);
1596 attr32
= opp
->src
[attr
->attr
].pending
;
1597 spin_unlock_irq(&opp
->lock
);
1599 if (put_user(attr32
, (u32 __user
*)(long)attr
->addr
))
1608 static int mpic_has_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1610 switch (attr
->group
) {
1611 case KVM_DEV_MPIC_GRP_MISC
:
1612 switch (attr
->attr
) {
1613 case KVM_DEV_MPIC_BASE_ADDR
:
1619 case KVM_DEV_MPIC_GRP_REGISTER
:
1622 case KVM_DEV_MPIC_GRP_IRQ_ACTIVE
:
1623 if (attr
->attr
> MAX_SRC
)
1632 static void mpic_destroy(struct kvm_device
*dev
)
1634 struct openpic
*opp
= dev
->private;
1636 dev
->kvm
->arch
.mpic
= NULL
;
1641 static int mpic_set_default_irq_routing(struct openpic
*opp
)
1643 struct kvm_irq_routing_entry
*routing
;
1645 /* Create a nop default map, so that dereferencing it still works */
1646 routing
= kzalloc((sizeof(*routing
)), GFP_KERNEL
);
1650 kvm_set_irq_routing(opp
->kvm
, routing
, 0, 0);
1656 static int mpic_create(struct kvm_device
*dev
, u32 type
)
1658 struct openpic
*opp
;
1661 /* We only support one MPIC at a time for now */
1662 if (dev
->kvm
->arch
.mpic
)
1665 opp
= kzalloc(sizeof(struct openpic
), GFP_KERNEL
);
1670 opp
->kvm
= dev
->kvm
;
1673 spin_lock_init(&opp
->lock
);
1675 add_mmio_region(opp
, &openpic_gbl_mmio
);
1676 add_mmio_region(opp
, &openpic_tmr_mmio
);
1677 add_mmio_region(opp
, &openpic_src_mmio
);
1678 add_mmio_region(opp
, &openpic_cpu_mmio
);
1680 switch (opp
->model
) {
1681 case KVM_DEV_TYPE_FSL_MPIC_20
:
1682 opp
->fsl
= &fsl_mpic_20
;
1683 opp
->brr1
= 0x00400200;
1684 opp
->flags
|= OPENPIC_FLAG_IDR_CRIT
;
1686 opp
->mpic_mode_mask
= GCR_MODE_MIXED
;
1688 fsl_common_init(opp
);
1692 case KVM_DEV_TYPE_FSL_MPIC_42
:
1693 opp
->fsl
= &fsl_mpic_42
;
1694 opp
->brr1
= 0x00400402;
1695 opp
->flags
|= OPENPIC_FLAG_ILR
;
1697 opp
->mpic_mode_mask
= GCR_MODE_PROXY
;
1699 fsl_common_init(opp
);
1708 ret
= mpic_set_default_irq_routing(opp
);
1715 dev
->kvm
->arch
.mpic
= opp
;
1724 struct kvm_device_ops kvm_mpic_ops
= {
1726 .create
= mpic_create
,
1727 .destroy
= mpic_destroy
,
1728 .set_attr
= mpic_set_attr
,
1729 .get_attr
= mpic_get_attr
,
1730 .has_attr
= mpic_has_attr
,
1733 int kvmppc_mpic_connect_vcpu(struct kvm_device
*dev
, struct kvm_vcpu
*vcpu
,
1736 struct openpic
*opp
= dev
->private;
1739 if (dev
->ops
!= &kvm_mpic_ops
)
1741 if (opp
->kvm
!= vcpu
->kvm
)
1743 if (cpu
< 0 || cpu
>= MAX_CPU
)
1746 spin_lock_irq(&opp
->lock
);
1748 if (opp
->dst
[cpu
].vcpu
) {
1752 if (vcpu
->arch
.irq_type
) {
1757 opp
->dst
[cpu
].vcpu
= vcpu
;
1758 opp
->nb_cpus
= max(opp
->nb_cpus
, cpu
+ 1);
1760 vcpu
->arch
.mpic
= opp
;
1761 vcpu
->arch
.irq_cpu_id
= cpu
;
1762 vcpu
->arch
.irq_type
= KVMPPC_IRQ_MPIC
;
1764 /* This might need to be changed if GCR gets extended */
1765 if (opp
->mpic_mode_mask
== GCR_MODE_PROXY
)
1766 vcpu
->arch
.epr_flags
|= KVMPPC_EPR_KERNEL
;
1769 spin_unlock_irq(&opp
->lock
);
1774 * This should only happen immediately before the mpic is destroyed,
1775 * so we shouldn't need to worry about anything still trying to
1776 * access the vcpu pointer.
1778 void kvmppc_mpic_disconnect_vcpu(struct openpic
*opp
, struct kvm_vcpu
*vcpu
)
1780 BUG_ON(!opp
->dst
[vcpu
->arch
.irq_cpu_id
].vcpu
);
1782 opp
->dst
[vcpu
->arch
.irq_cpu_id
].vcpu
= NULL
;
1787 * < 0 Interrupt was ignored (masked or not delivered for other reasons)
1788 * = 0 Interrupt was coalesced (previous irq is still pending)
1789 * > 0 Number of CPUs interrupt was delivered to
1791 static int mpic_set_irq(struct kvm_kernel_irq_routing_entry
*e
,
1792 struct kvm
*kvm
, int irq_source_id
, int level
,
1795 u32 irq
= e
->irqchip
.pin
;
1796 struct openpic
*opp
= kvm
->arch
.mpic
;
1797 unsigned long flags
;
1799 spin_lock_irqsave(&opp
->lock
, flags
);
1800 openpic_set_irq(opp
, irq
, level
);
1801 spin_unlock_irqrestore(&opp
->lock
, flags
);
1803 /* All code paths we care about don't check for the return value */
1807 int kvm_set_msi(struct kvm_kernel_irq_routing_entry
*e
,
1808 struct kvm
*kvm
, int irq_source_id
, int level
, bool line_status
)
1810 struct openpic
*opp
= kvm
->arch
.mpic
;
1811 unsigned long flags
;
1813 spin_lock_irqsave(&opp
->lock
, flags
);
1816 * XXX We ignore the target address for now, as we only support
1817 * a single MSI bank.
1819 openpic_msi_write(kvm
->arch
.mpic
, MSIIR_OFFSET
, e
->msi
.data
);
1820 spin_unlock_irqrestore(&opp
->lock
, flags
);
1822 /* All code paths we care about don't check for the return value */
1826 int kvm_set_routing_entry(struct kvm_irq_routing_table
*rt
,
1827 struct kvm_kernel_irq_routing_entry
*e
,
1828 const struct kvm_irq_routing_entry
*ue
)
1833 case KVM_IRQ_ROUTING_IRQCHIP
:
1834 e
->set
= mpic_set_irq
;
1835 e
->irqchip
.irqchip
= ue
->u
.irqchip
.irqchip
;
1836 e
->irqchip
.pin
= ue
->u
.irqchip
.pin
;
1837 if (e
->irqchip
.pin
>= KVM_IRQCHIP_NUM_PINS
)
1839 rt
->chip
[ue
->u
.irqchip
.irqchip
][e
->irqchip
.pin
] = ue
->gsi
;
1841 case KVM_IRQ_ROUTING_MSI
:
1842 e
->set
= kvm_set_msi
;
1843 e
->msi
.address_lo
= ue
->u
.msi
.address_lo
;
1844 e
->msi
.address_hi
= ue
->u
.msi
.address_hi
;
1845 e
->msi
.data
= ue
->u
.msi
.data
;