1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2009, Intel Corporation.
5 * Author: Weidong Han <weidong.han@intel.com>
9 #include <linux/acpi.h>
10 #include <linux/pci-acpi.h>
13 #include <xen/interface/physdev.h>
14 #include <xen/interface/xen.h>
16 #include <asm/xen/hypervisor.h>
17 #include <asm/xen/hypercall.h>
18 #include "../pci/pci.h"
19 #ifdef CONFIG_PCI_MMCONFIG
20 #include <asm/pci_x86.h>
22 static int xen_mcfg_late(void);
25 static bool __read_mostly pci_seg_supported
= true;
27 static int xen_add_device(struct device
*dev
)
30 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
32 struct pci_dev
*physfn
= pci_dev
->physfn
;
34 #ifdef CONFIG_PCI_MMCONFIG
35 static bool pci_mcfg_reserved
= false;
37 * Reserve MCFG areas in Xen on first invocation due to this being
38 * potentially called from inside of acpi_init immediately after
39 * MCFG table has been finally parsed.
41 if (!pci_mcfg_reserved
) {
43 pci_mcfg_reserved
= true;
46 if (pci_seg_supported
) {
47 DEFINE_RAW_FLEX(struct physdev_pci_device_add
, add
, optarr
, 1);
49 add
->seg
= pci_domain_nr(pci_dev
->bus
);
50 add
->bus
= pci_dev
->bus
->number
;
51 add
->devfn
= pci_dev
->devfn
;
58 if (pci_dev
->is_virtfn
) {
59 add
->flags
= XEN_PCI_DEV_VIRTFN
;
60 add
->physfn
.bus
= physfn
->bus
->number
;
61 add
->physfn
.devfn
= physfn
->devfn
;
64 if (pci_ari_enabled(pci_dev
->bus
) && PCI_SLOT(pci_dev
->devfn
))
65 add
->flags
= XEN_PCI_DEV_EXTFN
;
68 handle
= ACPI_HANDLE(&pci_dev
->dev
);
70 if (!handle
&& pci_dev
->is_virtfn
)
71 handle
= ACPI_HANDLE(physfn
->bus
->bridge
);
75 * This device was not listed in the ACPI name space at
76 * all. Try to get acpi handle of parent pci bus.
79 for (pbus
= pci_dev
->bus
; pbus
; pbus
= pbus
->parent
) {
80 handle
= acpi_pci_get_bridge_handle(pbus
);
89 unsigned long long pxm
;
91 status
= acpi_evaluate_integer(handle
, "_PXM",
93 if (ACPI_SUCCESS(status
)) {
95 add
->flags
|= XEN_PCI_DEV_PXM
;
98 status
= acpi_get_parent(handle
, &handle
);
99 } while (ACPI_SUCCESS(status
));
101 #endif /* CONFIG_ACPI */
103 r
= HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add
, add
);
106 pci_seg_supported
= false;
109 if (pci_domain_nr(pci_dev
->bus
))
111 #ifdef CONFIG_PCI_IOV
112 else if (pci_dev
->is_virtfn
) {
113 struct physdev_manage_pci_ext manage_pci_ext
= {
114 .bus
= pci_dev
->bus
->number
,
115 .devfn
= pci_dev
->devfn
,
117 .physfn
.bus
= physfn
->bus
->number
,
118 .physfn
.devfn
= physfn
->devfn
,
121 r
= HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext
,
125 else if (pci_ari_enabled(pci_dev
->bus
) && PCI_SLOT(pci_dev
->devfn
)) {
126 struct physdev_manage_pci_ext manage_pci_ext
= {
127 .bus
= pci_dev
->bus
->number
,
128 .devfn
= pci_dev
->devfn
,
132 r
= HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext
,
135 struct physdev_manage_pci manage_pci
= {
136 .bus
= pci_dev
->bus
->number
,
137 .devfn
= pci_dev
->devfn
,
140 r
= HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add
,
147 static int xen_remove_device(struct device
*dev
)
150 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
152 if (pci_seg_supported
) {
153 struct physdev_pci_device device
= {
154 .seg
= pci_domain_nr(pci_dev
->bus
),
155 .bus
= pci_dev
->bus
->number
,
156 .devfn
= pci_dev
->devfn
159 r
= HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_remove
,
161 } else if (pci_domain_nr(pci_dev
->bus
))
164 struct physdev_manage_pci manage_pci
= {
165 .bus
= pci_dev
->bus
->number
,
166 .devfn
= pci_dev
->devfn
169 r
= HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove
,
176 int xen_reset_device(const struct pci_dev
*dev
)
178 struct pci_device_reset device
= {
179 .dev
.seg
= pci_domain_nr(dev
->bus
),
180 .dev
.bus
= dev
->bus
->number
,
181 .dev
.devfn
= dev
->devfn
,
182 .flags
= PCI_DEVICE_RESET_FLR
,
185 return HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_reset
, &device
);
187 EXPORT_SYMBOL_GPL(xen_reset_device
);
189 static int xen_pci_notifier(struct notifier_block
*nb
,
190 unsigned long action
, void *data
)
192 struct device
*dev
= data
;
196 case BUS_NOTIFY_ADD_DEVICE
:
197 r
= xen_add_device(dev
);
199 case BUS_NOTIFY_DEL_DEVICE
:
200 r
= xen_remove_device(dev
);
206 dev_err(dev
, "Failed to %s - passthrough or MSI/MSI-X might fail!\n",
207 action
== BUS_NOTIFY_ADD_DEVICE
? "add" :
208 (action
== BUS_NOTIFY_DEL_DEVICE
? "delete" : "?"));
212 static struct notifier_block device_nb
= {
213 .notifier_call
= xen_pci_notifier
,
216 static int __init
register_xen_pci_notifier(void)
218 if (!xen_initial_domain())
221 return bus_register_notifier(&pci_bus_type
, &device_nb
);
224 arch_initcall(register_xen_pci_notifier
);
226 #ifdef CONFIG_PCI_MMCONFIG
227 static int xen_mcfg_late(void)
229 struct pci_mmcfg_region
*cfg
;
232 if (!xen_initial_domain())
235 if ((pci_probe
& PCI_PROBE_MMCONF
) == 0)
238 if (list_empty(&pci_mmcfg_list
))
241 /* Check whether they are in the right area. */
242 list_for_each_entry(cfg
, &pci_mmcfg_list
, list
) {
243 struct physdev_pci_mmcfg_reserved r
;
245 r
.address
= cfg
->address
;
246 r
.segment
= cfg
->segment
;
247 r
.start_bus
= cfg
->start_bus
;
248 r
.end_bus
= cfg
->end_bus
;
249 r
.flags
= XEN_PCI_MMCFG_RESERVED
;
251 rc
= HYPERVISOR_physdev_op(PHYSDEVOP_pci_mmcfg_reserved
, &r
);
258 pr_warn("Failed to report MMCONFIG reservation"
259 " state for %s to hypervisor"
268 #ifdef CONFIG_XEN_DOM0
269 struct xen_device_domain_owner
{
272 struct list_head list
;
275 static DEFINE_SPINLOCK(dev_domain_list_spinlock
);
276 static LIST_HEAD(dev_domain_list
);
278 static struct xen_device_domain_owner
*find_device(struct pci_dev
*dev
)
280 struct xen_device_domain_owner
*owner
;
282 list_for_each_entry(owner
, &dev_domain_list
, list
) {
283 if (owner
->dev
== dev
)
289 int xen_find_device_domain_owner(struct pci_dev
*dev
)
291 struct xen_device_domain_owner
*owner
;
292 int domain
= -ENODEV
;
294 spin_lock(&dev_domain_list_spinlock
);
295 owner
= find_device(dev
);
297 domain
= owner
->domain
;
298 spin_unlock(&dev_domain_list_spinlock
);
301 EXPORT_SYMBOL_GPL(xen_find_device_domain_owner
);
303 int xen_register_device_domain_owner(struct pci_dev
*dev
, uint16_t domain
)
305 struct xen_device_domain_owner
*owner
;
307 owner
= kzalloc(sizeof(struct xen_device_domain_owner
), GFP_KERNEL
);
311 spin_lock(&dev_domain_list_spinlock
);
312 if (find_device(dev
)) {
313 spin_unlock(&dev_domain_list_spinlock
);
317 owner
->domain
= domain
;
319 list_add_tail(&owner
->list
, &dev_domain_list
);
320 spin_unlock(&dev_domain_list_spinlock
);
323 EXPORT_SYMBOL_GPL(xen_register_device_domain_owner
);
325 int xen_unregister_device_domain_owner(struct pci_dev
*dev
)
327 struct xen_device_domain_owner
*owner
;
329 spin_lock(&dev_domain_list_spinlock
);
330 owner
= find_device(dev
);
332 spin_unlock(&dev_domain_list_spinlock
);
335 list_del(&owner
->list
);
336 spin_unlock(&dev_domain_list_spinlock
);
340 EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner
);