1 // SPDX-License-Identifier: GPL-2.0
3 * MSI hooks for standard x86 apic
9 #include <linux/dmar.h>
11 #include <asm/msidef.h>
13 static struct irq_chip ia64_msi_chip
;
16 static int ia64_set_msi_irq_affinity(struct irq_data
*idata
,
17 const cpumask_t
*cpu_mask
, bool force
)
21 int cpu
= cpumask_first_and(cpu_mask
, cpu_online_mask
);
22 unsigned int irq
= idata
->irq
;
24 if (irq_prepare_move(irq
, cpu
))
27 __get_cached_msi_msg(irq_data_get_msi_desc(idata
), &msg
);
29 addr
= msg
.address_lo
;
30 addr
&= MSI_ADDR_DEST_ID_MASK
;
31 addr
|= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu
));
32 msg
.address_lo
= addr
;
35 data
&= MSI_DATA_VECTOR_MASK
;
36 data
|= MSI_DATA_VECTOR(irq_to_vector(irq
));
39 pci_write_msi_msg(irq
, &msg
);
40 cpumask_copy(irq_data_get_affinity_mask(idata
), cpumask_of(cpu
));
44 #endif /* CONFIG_SMP */
46 int ia64_setup_msi_irq(struct pci_dev
*pdev
, struct msi_desc
*desc
)
49 unsigned long dest_phys_id
;
56 irq_set_msi_desc(irq
, desc
);
57 dest_phys_id
= cpu_physical_id(cpumask_any_and(&(irq_to_domain(irq
)),
59 vector
= irq_to_vector(irq
);
64 MSI_ADDR_DEST_MODE_PHYS
|
65 MSI_ADDR_REDIRECTION_CPU
|
66 MSI_ADDR_DEST_ID_CPU(dest_phys_id
);
69 MSI_DATA_TRIGGER_EDGE
|
70 MSI_DATA_LEVEL_ASSERT
|
71 MSI_DATA_DELIVERY_FIXED
|
72 MSI_DATA_VECTOR(vector
);
74 pci_write_msi_msg(irq
, &msg
);
75 irq_set_chip_and_handler(irq
, &ia64_msi_chip
, handle_edge_irq
);
80 void ia64_teardown_msi_irq(unsigned int irq
)
85 static void ia64_ack_msi_irq(struct irq_data
*data
)
87 irq_complete_move(data
->irq
);
92 static int ia64_msi_retrigger_irq(struct irq_data
*data
)
94 unsigned int vector
= irq_to_vector(data
->irq
);
95 ia64_resend_irq(vector
);
101 * Generic ops used on most IA64 platforms.
103 static struct irq_chip ia64_msi_chip
= {
105 .irq_mask
= pci_msi_mask_irq
,
106 .irq_unmask
= pci_msi_unmask_irq
,
107 .irq_ack
= ia64_ack_msi_irq
,
109 .irq_set_affinity
= ia64_set_msi_irq_affinity
,
111 .irq_retrigger
= ia64_msi_retrigger_irq
,
115 int arch_setup_msi_irq(struct pci_dev
*pdev
, struct msi_desc
*desc
)
117 if (platform_setup_msi_irq
)
118 return platform_setup_msi_irq(pdev
, desc
);
120 return ia64_setup_msi_irq(pdev
, desc
);
123 void arch_teardown_msi_irq(unsigned int irq
)
125 if (platform_teardown_msi_irq
)
126 return platform_teardown_msi_irq(irq
);
128 return ia64_teardown_msi_irq(irq
);
131 #ifdef CONFIG_INTEL_IOMMU
133 static int dmar_msi_set_affinity(struct irq_data
*data
,
134 const struct cpumask
*mask
, bool force
)
136 unsigned int irq
= data
->irq
;
137 struct irq_cfg
*cfg
= irq_cfg
+ irq
;
139 int cpu
= cpumask_first_and(mask
, cpu_online_mask
);
141 if (irq_prepare_move(irq
, cpu
))
144 dmar_msi_read(irq
, &msg
);
146 msg
.data
&= ~MSI_DATA_VECTOR_MASK
;
147 msg
.data
|= MSI_DATA_VECTOR(cfg
->vector
);
148 msg
.address_lo
&= ~MSI_ADDR_DEST_ID_MASK
;
149 msg
.address_lo
|= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu
));
151 dmar_msi_write(irq
, &msg
);
152 cpumask_copy(irq_data_get_affinity_mask(data
), mask
);
156 #endif /* CONFIG_SMP */
158 static struct irq_chip dmar_msi_type
= {
160 .irq_unmask
= dmar_msi_unmask
,
161 .irq_mask
= dmar_msi_mask
,
162 .irq_ack
= ia64_ack_msi_irq
,
164 .irq_set_affinity
= dmar_msi_set_affinity
,
166 .irq_retrigger
= ia64_msi_retrigger_irq
,
170 msi_compose_msg(struct pci_dev
*pdev
, unsigned int irq
, struct msi_msg
*msg
)
172 struct irq_cfg
*cfg
= irq_cfg
+ irq
;
175 dest
= cpu_physical_id(cpumask_first_and(&(irq_to_domain(irq
)),
181 MSI_ADDR_DEST_MODE_PHYS
|
182 MSI_ADDR_REDIRECTION_CPU
|
183 MSI_ADDR_DEST_ID_CPU(dest
);
186 MSI_DATA_TRIGGER_EDGE
|
187 MSI_DATA_LEVEL_ASSERT
|
188 MSI_DATA_DELIVERY_FIXED
|
189 MSI_DATA_VECTOR(cfg
->vector
);
192 int dmar_alloc_hwirq(int id
, int node
, void *arg
)
199 irq_set_handler_data(irq
, arg
);
200 irq_set_chip_and_handler_name(irq
, &dmar_msi_type
,
201 handle_edge_irq
, "edge");
202 msi_compose_msg(NULL
, irq
, &msg
);
203 dmar_msi_write(irq
, &msg
);
209 void dmar_free_hwirq(int irq
)
211 irq_set_handler_data(irq
, NULL
);
214 #endif /* CONFIG_INTEL_IOMMU */