1 #include <linux/interrupt.h>
2 #include <linux/dmar.h>
3 #include <linux/spinlock.h>
4 #include <linux/jiffies.h>
7 #include <asm/io_apic.h>
10 #include <linux/intel-iommu.h>
11 #include "intr_remapping.h"
13 static struct ioapic_scope ir_ioapic
[MAX_IO_APICS
];
14 static int ir_ioapic_num
;
15 int intr_remapping_enabled
;
18 struct intel_iommu
*iommu
;
24 #ifdef CONFIG_SPARSE_IRQ
25 static struct irq_2_iommu
*get_one_free_irq_2_iommu(int cpu
)
27 struct irq_2_iommu
*iommu
;
30 node
= cpu_to_node(cpu
);
32 iommu
= kzalloc_node(sizeof(*iommu
), GFP_ATOMIC
, node
);
33 printk(KERN_DEBUG
"alloc irq_2_iommu on cpu %d node %d\n", cpu
, node
);
38 static struct irq_2_iommu
*irq_2_iommu(unsigned int irq
)
40 struct irq_desc
*desc
;
42 desc
= irq_to_desc(irq
);
44 if (WARN_ON_ONCE(!desc
))
47 return desc
->irq_2_iommu
;
50 static struct irq_2_iommu
*irq_2_iommu_alloc_cpu(unsigned int irq
, int cpu
)
52 struct irq_desc
*desc
;
53 struct irq_2_iommu
*irq_iommu
;
56 * alloc irq desc if not allocated already.
58 desc
= irq_to_desc_alloc_cpu(irq
, cpu
);
60 printk(KERN_INFO
"can not get irq_desc for %d\n", irq
);
64 irq_iommu
= desc
->irq_2_iommu
;
67 desc
->irq_2_iommu
= get_one_free_irq_2_iommu(cpu
);
69 return desc
->irq_2_iommu
;
72 static struct irq_2_iommu
*irq_2_iommu_alloc(unsigned int irq
)
74 return irq_2_iommu_alloc_cpu(irq
, boot_cpu_id
);
77 #else /* !CONFIG_SPARSE_IRQ */
79 static struct irq_2_iommu irq_2_iommuX
[NR_IRQS
];
81 static struct irq_2_iommu
*irq_2_iommu(unsigned int irq
)
84 return &irq_2_iommuX
[irq
];
88 static struct irq_2_iommu
*irq_2_iommu_alloc(unsigned int irq
)
90 return irq_2_iommu(irq
);
94 static DEFINE_SPINLOCK(irq_2_ir_lock
);
96 static struct irq_2_iommu
*valid_irq_2_iommu(unsigned int irq
)
98 struct irq_2_iommu
*irq_iommu
;
100 irq_iommu
= irq_2_iommu(irq
);
105 if (!irq_iommu
->iommu
)
111 int irq_remapped(int irq
)
113 return valid_irq_2_iommu(irq
) != NULL
;
116 int get_irte(int irq
, struct irte
*entry
)
119 struct irq_2_iommu
*irq_iommu
;
125 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
126 irq_iommu
= valid_irq_2_iommu(irq
);
128 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
132 index
= irq_iommu
->irte_index
+ irq_iommu
->sub_handle
;
133 *entry
= *(irq_iommu
->iommu
->ir_table
->base
+ index
);
135 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
139 int alloc_irte(struct intel_iommu
*iommu
, int irq
, u16 count
)
141 struct ir_table
*table
= iommu
->ir_table
;
142 struct irq_2_iommu
*irq_iommu
;
143 u16 index
, start_index
;
144 unsigned int mask
= 0;
151 #ifndef CONFIG_SPARSE_IRQ
152 /* protect irq_2_iommu_alloc later */
158 * start the IRTE search from index 0.
160 index
= start_index
= 0;
163 count
= __roundup_pow_of_two(count
);
167 if (mask
> ecap_max_handle_mask(iommu
->ecap
)) {
169 "Requested mask %x exceeds the max invalidation handle"
170 " mask value %Lx\n", mask
,
171 ecap_max_handle_mask(iommu
->ecap
));
175 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
177 for (i
= index
; i
< index
+ count
; i
++)
178 if (table
->base
[i
].present
)
180 /* empty index found */
181 if (i
== index
+ count
)
184 index
= (index
+ count
) % INTR_REMAP_TABLE_ENTRIES
;
186 if (index
== start_index
) {
187 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
188 printk(KERN_ERR
"can't allocate an IRTE\n");
193 for (i
= index
; i
< index
+ count
; i
++)
194 table
->base
[i
].present
= 1;
196 irq_iommu
= irq_2_iommu_alloc(irq
);
198 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
199 printk(KERN_ERR
"can't allocate irq_2_iommu\n");
203 irq_iommu
->iommu
= iommu
;
204 irq_iommu
->irte_index
= index
;
205 irq_iommu
->sub_handle
= 0;
206 irq_iommu
->irte_mask
= mask
;
208 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
213 static int qi_flush_iec(struct intel_iommu
*iommu
, int index
, int mask
)
217 desc
.low
= QI_IEC_IIDEX(index
) | QI_IEC_TYPE
| QI_IEC_IM(mask
)
221 return qi_submit_sync(&desc
, iommu
);
224 int map_irq_to_irte_handle(int irq
, u16
*sub_handle
)
227 struct irq_2_iommu
*irq_iommu
;
230 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
231 irq_iommu
= valid_irq_2_iommu(irq
);
233 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
237 *sub_handle
= irq_iommu
->sub_handle
;
238 index
= irq_iommu
->irte_index
;
239 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
243 int set_irte_irq(int irq
, struct intel_iommu
*iommu
, u16 index
, u16 subhandle
)
245 struct irq_2_iommu
*irq_iommu
;
248 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
250 irq_iommu
= irq_2_iommu_alloc(irq
);
253 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
254 printk(KERN_ERR
"can't allocate irq_2_iommu\n");
258 irq_iommu
->iommu
= iommu
;
259 irq_iommu
->irte_index
= index
;
260 irq_iommu
->sub_handle
= subhandle
;
261 irq_iommu
->irte_mask
= 0;
263 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
268 int clear_irte_irq(int irq
, struct intel_iommu
*iommu
, u16 index
)
270 struct irq_2_iommu
*irq_iommu
;
273 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
274 irq_iommu
= valid_irq_2_iommu(irq
);
276 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
280 irq_iommu
->iommu
= NULL
;
281 irq_iommu
->irte_index
= 0;
282 irq_iommu
->sub_handle
= 0;
283 irq_2_iommu(irq
)->irte_mask
= 0;
285 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
290 int modify_irte(int irq
, struct irte
*irte_modified
)
295 struct intel_iommu
*iommu
;
296 struct irq_2_iommu
*irq_iommu
;
299 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
300 irq_iommu
= valid_irq_2_iommu(irq
);
302 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
306 iommu
= irq_iommu
->iommu
;
308 index
= irq_iommu
->irte_index
+ irq_iommu
->sub_handle
;
309 irte
= &iommu
->ir_table
->base
[index
];
311 set_64bit((unsigned long *)irte
, irte_modified
->low
);
312 __iommu_flush_cache(iommu
, irte
, sizeof(*irte
));
314 rc
= qi_flush_iec(iommu
, index
, 0);
315 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
320 int flush_irte(int irq
)
324 struct intel_iommu
*iommu
;
325 struct irq_2_iommu
*irq_iommu
;
328 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
329 irq_iommu
= valid_irq_2_iommu(irq
);
331 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
335 iommu
= irq_iommu
->iommu
;
337 index
= irq_iommu
->irte_index
+ irq_iommu
->sub_handle
;
339 rc
= qi_flush_iec(iommu
, index
, irq_iommu
->irte_mask
);
340 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
345 struct intel_iommu
*map_ioapic_to_ir(int apic
)
349 for (i
= 0; i
< MAX_IO_APICS
; i
++)
350 if (ir_ioapic
[i
].id
== apic
)
351 return ir_ioapic
[i
].iommu
;
355 struct intel_iommu
*map_dev_to_ir(struct pci_dev
*dev
)
357 struct dmar_drhd_unit
*drhd
;
359 drhd
= dmar_find_matched_drhd_unit(dev
);
366 int free_irte(int irq
)
371 struct intel_iommu
*iommu
;
372 struct irq_2_iommu
*irq_iommu
;
375 spin_lock_irqsave(&irq_2_ir_lock
, flags
);
376 irq_iommu
= valid_irq_2_iommu(irq
);
378 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
382 iommu
= irq_iommu
->iommu
;
384 index
= irq_iommu
->irte_index
+ irq_iommu
->sub_handle
;
385 irte
= &iommu
->ir_table
->base
[index
];
387 if (!irq_iommu
->sub_handle
) {
388 for (i
= 0; i
< (1 << irq_iommu
->irte_mask
); i
++)
389 set_64bit((unsigned long *)irte
, 0);
390 rc
= qi_flush_iec(iommu
, index
, irq_iommu
->irte_mask
);
393 irq_iommu
->iommu
= NULL
;
394 irq_iommu
->irte_index
= 0;
395 irq_iommu
->sub_handle
= 0;
396 irq_iommu
->irte_mask
= 0;
398 spin_unlock_irqrestore(&irq_2_ir_lock
, flags
);
403 static void iommu_set_intr_remapping(struct intel_iommu
*iommu
, int mode
)
409 addr
= virt_to_phys((void *)iommu
->ir_table
->base
);
411 spin_lock_irqsave(&iommu
->register_lock
, flags
);
413 dmar_writeq(iommu
->reg
+ DMAR_IRTA_REG
,
414 (addr
) | IR_X2APIC_MODE(mode
) | INTR_REMAP_TABLE_REG_SIZE
);
416 /* Set interrupt-remapping table pointer */
417 cmd
= iommu
->gcmd
| DMA_GCMD_SIRTP
;
418 writel(cmd
, iommu
->reg
+ DMAR_GCMD_REG
);
420 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
421 readl
, (sts
& DMA_GSTS_IRTPS
), sts
);
422 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
425 * global invalidation of interrupt entry cache before enabling
426 * interrupt-remapping.
428 qi_global_iec(iommu
);
430 spin_lock_irqsave(&iommu
->register_lock
, flags
);
432 /* Enable interrupt-remapping */
433 cmd
= iommu
->gcmd
| DMA_GCMD_IRE
;
434 iommu
->gcmd
|= DMA_GCMD_IRE
;
435 writel(cmd
, iommu
->reg
+ DMAR_GCMD_REG
);
437 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
438 readl
, (sts
& DMA_GSTS_IRES
), sts
);
440 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
444 static int setup_intr_remapping(struct intel_iommu
*iommu
, int mode
)
446 struct ir_table
*ir_table
;
449 ir_table
= iommu
->ir_table
= kzalloc(sizeof(struct ir_table
),
452 if (!iommu
->ir_table
)
455 pages
= alloc_pages(GFP_KERNEL
| __GFP_ZERO
, INTR_REMAP_PAGE_ORDER
);
458 printk(KERN_ERR
"failed to allocate pages of order %d\n",
459 INTR_REMAP_PAGE_ORDER
);
460 kfree(iommu
->ir_table
);
464 ir_table
->base
= page_address(pages
);
466 iommu_set_intr_remapping(iommu
, mode
);
471 * Disable Interrupt Remapping.
473 static void disable_intr_remapping(struct intel_iommu
*iommu
)
478 if (!ecap_ir_support(iommu
->ecap
))
481 spin_lock_irqsave(&iommu
->register_lock
, flags
);
483 sts
= dmar_readq(iommu
->reg
+ DMAR_GSTS_REG
);
484 if (!(sts
& DMA_GSTS_IRES
))
487 iommu
->gcmd
&= ~DMA_GCMD_IRE
;
488 writel(iommu
->gcmd
, iommu
->reg
+ DMAR_GCMD_REG
);
490 IOMMU_WAIT_OP(iommu
, DMAR_GSTS_REG
,
491 readl
, !(sts
& DMA_GSTS_IRES
), sts
);
494 spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
497 int __init
enable_intr_remapping(int eim
)
499 struct dmar_drhd_unit
*drhd
;
502 for_each_drhd_unit(drhd
) {
503 struct intel_iommu
*iommu
= drhd
->iommu
;
506 * Clear previous faults.
508 dmar_fault(-1, iommu
);
511 * Disable intr remapping and queued invalidation, if already
512 * enabled prior to OS handover.
514 disable_intr_remapping(iommu
);
516 dmar_disable_qi(iommu
);
520 * check for the Interrupt-remapping support
522 for_each_drhd_unit(drhd
) {
523 struct intel_iommu
*iommu
= drhd
->iommu
;
525 if (!ecap_ir_support(iommu
->ecap
))
528 if (eim
&& !ecap_eim_support(iommu
->ecap
)) {
529 printk(KERN_INFO
"DRHD %Lx: EIM not supported by DRHD, "
530 " ecap %Lx\n", drhd
->reg_base_addr
, iommu
->ecap
);
536 * Enable queued invalidation for all the DRHD's.
538 for_each_drhd_unit(drhd
) {
540 struct intel_iommu
*iommu
= drhd
->iommu
;
541 ret
= dmar_enable_qi(iommu
);
544 printk(KERN_ERR
"DRHD %Lx: failed to enable queued, "
545 " invalidation, ecap %Lx, ret %d\n",
546 drhd
->reg_base_addr
, iommu
->ecap
, ret
);
552 * Setup Interrupt-remapping for all the DRHD's now.
554 for_each_drhd_unit(drhd
) {
555 struct intel_iommu
*iommu
= drhd
->iommu
;
557 if (!ecap_ir_support(iommu
->ecap
))
560 if (setup_intr_remapping(iommu
, eim
))
569 intr_remapping_enabled
= 1;
575 * handle error condition gracefully here!
580 static int ir_parse_ioapic_scope(struct acpi_dmar_header
*header
,
581 struct intel_iommu
*iommu
)
583 struct acpi_dmar_hardware_unit
*drhd
;
584 struct acpi_dmar_device_scope
*scope
;
587 drhd
= (struct acpi_dmar_hardware_unit
*)header
;
589 start
= (void *)(drhd
+ 1);
590 end
= ((void *)drhd
) + header
->length
;
592 while (start
< end
) {
594 if (scope
->entry_type
== ACPI_DMAR_SCOPE_TYPE_IOAPIC
) {
595 if (ir_ioapic_num
== MAX_IO_APICS
) {
596 printk(KERN_WARNING
"Exceeded Max IO APICS\n");
600 printk(KERN_INFO
"IOAPIC id %d under DRHD base"
601 " 0x%Lx\n", scope
->enumeration_id
,
604 ir_ioapic
[ir_ioapic_num
].iommu
= iommu
;
605 ir_ioapic
[ir_ioapic_num
].id
= scope
->enumeration_id
;
608 start
+= scope
->length
;
615 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
618 int __init
parse_ioapics_under_ir(void)
620 struct dmar_drhd_unit
*drhd
;
621 int ir_supported
= 0;
623 for_each_drhd_unit(drhd
) {
624 struct intel_iommu
*iommu
= drhd
->iommu
;
626 if (ecap_ir_support(iommu
->ecap
)) {
627 if (ir_parse_ioapic_scope(drhd
->hdr
, iommu
))
634 if (ir_supported
&& ir_ioapic_num
!= nr_ioapics
) {
636 "Not all IO-APIC's listed under remapping hardware\n");