1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) Microsoft Corporation.
6 * Jake Oshins <jakeo@microsoft.com>
8 * This driver acts as a paravirtual front-end for PCI Express root buses.
9 * When a PCI Express function (either an entire device or an SR-IOV
10 * Virtual Function) is being passed through to the VM, this driver exposes
11 * a new bus to the guest VM. This is modeled as a root PCI bus because
12 * no bridges are being exposed to the VM. In fact, with a "Generation 2"
13 * VM within Hyper-V, there may seem to be no PCI bus at all in the VM
14 * until a device as been exposed using this driver.
16 * Each root PCI bus has its own PCI domain, which is called "Segment" in
17 * the PCI Firmware Specifications. Thus while each device passed through
18 * to the VM using this front-end will appear at "device 0", the domain will
19 * be unique. Typically, each bus will have one PCI function on it, though
20 * this driver does support more than one.
22 * In order to map the interrupts from the device through to the guest VM,
23 * this driver also implements an IRQ Domain, which handles interrupts (either
24 * MSI or MSI-X) associated with the functions on the bus. As interrupts are
25 * set up, torn down, or reaffined, this driver communicates with the
26 * underlying hypervisor to adjust the mappings in the I/O MMU so that each
27 * interrupt will be delivered to the correct virtual processor at the right
28 * vector. This driver does not support level-triggered (line-based)
29 * interrupts, and will report that the Interrupt Line register in the
30 * function's configuration space is zero.
32 * The rest of this driver mostly maps PCI concepts onto underlying Hyper-V
33 * facilities. For instance, the configuration space of a function exposed
34 * by Hyper-V is mapped into a single page of memory space, and the
35 * read and write handlers for config space must be aware of this mechanism.
36 * Similarly, device setup and teardown involves messages sent to and from
37 * the PCI back-end driver in Hyper-V.
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/delay.h>
44 #include <linux/semaphore.h>
45 #include <linux/irqdomain.h>
46 #include <asm/irqdomain.h>
48 #include <linux/msi.h>
49 #include <linux/hyperv.h>
50 #include <linux/refcount.h>
51 #include <asm/mshyperv.h>
54 * Protocol versions. The low word is the minor version, the high word the
58 #define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor)))
59 #define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16)
60 #define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff)
62 enum pci_protocol_version_t
{
63 PCI_PROTOCOL_VERSION_1_1
= PCI_MAKE_VERSION(1, 1), /* Win10 */
64 PCI_PROTOCOL_VERSION_1_2
= PCI_MAKE_VERSION(1, 2), /* RS1 */
67 #define CPU_AFFINITY_ALL -1ULL
70 * Supported protocol versions in the order of probing - highest go
73 static enum pci_protocol_version_t pci_protocol_versions
[] = {
74 PCI_PROTOCOL_VERSION_1_2
,
75 PCI_PROTOCOL_VERSION_1_1
,
79 * Protocol version negotiated by hv_pci_protocol_negotiation().
81 static enum pci_protocol_version_t pci_protocol_version
;
83 #define PCI_CONFIG_MMIO_LENGTH 0x2000
84 #define CFG_PAGE_OFFSET 0x1000
85 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
87 #define MAX_SUPPORTED_MSI_MESSAGES 0x400
89 #define STATUS_REVISION_MISMATCH 0xC0000059
95 enum pci_message_type
{
99 PCI_MESSAGE_BASE
= 0x42490000,
100 PCI_BUS_RELATIONS
= PCI_MESSAGE_BASE
+ 0,
101 PCI_QUERY_BUS_RELATIONS
= PCI_MESSAGE_BASE
+ 1,
102 PCI_POWER_STATE_CHANGE
= PCI_MESSAGE_BASE
+ 4,
103 PCI_QUERY_RESOURCE_REQUIREMENTS
= PCI_MESSAGE_BASE
+ 5,
104 PCI_QUERY_RESOURCE_RESOURCES
= PCI_MESSAGE_BASE
+ 6,
105 PCI_BUS_D0ENTRY
= PCI_MESSAGE_BASE
+ 7,
106 PCI_BUS_D0EXIT
= PCI_MESSAGE_BASE
+ 8,
107 PCI_READ_BLOCK
= PCI_MESSAGE_BASE
+ 9,
108 PCI_WRITE_BLOCK
= PCI_MESSAGE_BASE
+ 0xA,
109 PCI_EJECT
= PCI_MESSAGE_BASE
+ 0xB,
110 PCI_QUERY_STOP
= PCI_MESSAGE_BASE
+ 0xC,
111 PCI_REENABLE
= PCI_MESSAGE_BASE
+ 0xD,
112 PCI_QUERY_STOP_FAILED
= PCI_MESSAGE_BASE
+ 0xE,
113 PCI_EJECTION_COMPLETE
= PCI_MESSAGE_BASE
+ 0xF,
114 PCI_RESOURCES_ASSIGNED
= PCI_MESSAGE_BASE
+ 0x10,
115 PCI_RESOURCES_RELEASED
= PCI_MESSAGE_BASE
+ 0x11,
116 PCI_INVALIDATE_BLOCK
= PCI_MESSAGE_BASE
+ 0x12,
117 PCI_QUERY_PROTOCOL_VERSION
= PCI_MESSAGE_BASE
+ 0x13,
118 PCI_CREATE_INTERRUPT_MESSAGE
= PCI_MESSAGE_BASE
+ 0x14,
119 PCI_DELETE_INTERRUPT_MESSAGE
= PCI_MESSAGE_BASE
+ 0x15,
120 PCI_RESOURCES_ASSIGNED2
= PCI_MESSAGE_BASE
+ 0x16,
121 PCI_CREATE_INTERRUPT_MESSAGE2
= PCI_MESSAGE_BASE
+ 0x17,
122 PCI_DELETE_INTERRUPT_MESSAGE2
= PCI_MESSAGE_BASE
+ 0x18, /* unused */
127 * Structures defining the virtual PCI Express protocol.
139 * Function numbers are 8-bits wide on Express, as interpreted through ARI,
140 * which is all this driver does. This representation is the one used in
141 * Windows, which is what is expected when sending this back and forth with
142 * the Hyper-V parent partition.
144 union win_slot_encoding
{
154 * Pretty much as defined in the PCI Specifications.
156 struct pci_function_description
{
157 u16 v_id
; /* vendor ID */
158 u16 d_id
; /* device ID */
164 union win_slot_encoding win_slot
;
165 u32 ser
; /* serial number */
171 * @delivery_mode: As defined in Intel's Programmer's
172 * Reference Manual, Volume 3, Chapter 8.
173 * @vector_count: Number of contiguous entries in the
174 * Interrupt Descriptor Table that are
175 * occupied by this Message-Signaled
176 * Interrupt. For "MSI", as first defined
177 * in PCI 2.2, this can be between 1 and
178 * 32. For "MSI-X," as first defined in PCI
179 * 3.0, this must be 1, as each MSI-X table
180 * entry would have its own descriptor.
181 * @reserved: Empty space
182 * @cpu_mask: All the target virtual processors.
193 * struct hv_msi_desc2 - 1.2 version of hv_msi_desc
195 * @delivery_mode: As defined in Intel's Programmer's
196 * Reference Manual, Volume 3, Chapter 8.
197 * @vector_count: Number of contiguous entries in the
198 * Interrupt Descriptor Table that are
199 * occupied by this Message-Signaled
200 * Interrupt. For "MSI", as first defined
201 * in PCI 2.2, this can be between 1 and
202 * 32. For "MSI-X," as first defined in PCI
203 * 3.0, this must be 1, as each MSI-X table
204 * entry would have its own descriptor.
205 * @processor_count: number of bits enabled in array.
206 * @processor_array: All the target virtual processors.
208 struct hv_msi_desc2
{
213 u16 processor_array
[32];
217 * struct tran_int_desc
218 * @reserved: unused, padding
219 * @vector_count: same as in hv_msi_desc
220 * @data: This is the "data payload" value that is
221 * written by the device when it generates
222 * a message-signaled interrupt, either MSI
224 * @address: This is the address to which the data
225 * payload is written on interrupt
228 struct tran_int_desc
{
236 * A generic message format for virtual PCI.
237 * Specific message formats are defined later in the file.
244 struct pci_child_message
{
245 struct pci_message message_type
;
246 union win_slot_encoding wslot
;
249 struct pci_incoming_message
{
250 struct vmpacket_descriptor hdr
;
251 struct pci_message message_type
;
254 struct pci_response
{
255 struct vmpacket_descriptor hdr
;
256 s32 status
; /* negative values are failures */
260 void (*completion_func
)(void *context
, struct pci_response
*resp
,
261 int resp_packet_size
);
264 struct pci_message message
[0];
268 * Specific message types supporting the PCI protocol.
272 * Version negotiation message. Sent from the guest to the host.
273 * The guest is free to try different versions until the host
274 * accepts the version.
276 * pci_version: The protocol version requested.
277 * is_last_attempt: If TRUE, this is the last version guest will request.
278 * reservedz: Reserved field, set to zero.
281 struct pci_version_request
{
282 struct pci_message message_type
;
283 u32 protocol_version
;
287 * Bus D0 Entry. This is sent from the guest to the host when the virtual
288 * bus (PCI Express port) is ready for action.
291 struct pci_bus_d0_entry
{
292 struct pci_message message_type
;
297 struct pci_bus_relations
{
298 struct pci_incoming_message incoming
;
300 struct pci_function_description func
[0];
303 struct pci_q_res_req_response
{
304 struct vmpacket_descriptor hdr
;
305 s32 status
; /* negative values are failures */
309 struct pci_set_power
{
310 struct pci_message message_type
;
311 union win_slot_encoding wslot
;
312 u32 power_state
; /* In Windows terms */
316 struct pci_set_power_response
{
317 struct vmpacket_descriptor hdr
;
318 s32 status
; /* negative values are failures */
319 union win_slot_encoding wslot
;
320 u32 resultant_state
; /* In Windows terms */
324 struct pci_resources_assigned
{
325 struct pci_message message_type
;
326 union win_slot_encoding wslot
;
327 u8 memory_range
[0x14][6]; /* not used here */
332 struct pci_resources_assigned2
{
333 struct pci_message message_type
;
334 union win_slot_encoding wslot
;
335 u8 memory_range
[0x14][6]; /* not used here */
336 u32 msi_descriptor_count
;
340 struct pci_create_interrupt
{
341 struct pci_message message_type
;
342 union win_slot_encoding wslot
;
343 struct hv_msi_desc int_desc
;
346 struct pci_create_int_response
{
347 struct pci_response response
;
349 struct tran_int_desc int_desc
;
352 struct pci_create_interrupt2
{
353 struct pci_message message_type
;
354 union win_slot_encoding wslot
;
355 struct hv_msi_desc2 int_desc
;
358 struct pci_delete_interrupt
{
359 struct pci_message message_type
;
360 union win_slot_encoding wslot
;
361 struct tran_int_desc int_desc
;
364 struct pci_dev_incoming
{
365 struct pci_incoming_message incoming
;
366 union win_slot_encoding wslot
;
369 struct pci_eject_response
{
370 struct pci_message message_type
;
371 union win_slot_encoding wslot
;
375 static int pci_ring_size
= (4 * PAGE_SIZE
);
378 * Definitions or interrupt steering hypercall.
380 #define HV_PARTITION_ID_SELF ((u64)-1)
381 #define HVCALL_RETARGET_INTERRUPT 0x7e
383 struct hv_interrupt_entry
{
384 u32 source
; /* 1 for MSI(-X) */
390 #define HV_VP_SET_BANK_COUNT_MAX 5 /* current implementation limit */
393 u64 format
; /* 0 (HvGenericSetSparse4k) */
395 u64 masks
[HV_VP_SET_BANK_COUNT_MAX
];
399 * flags for hv_device_interrupt_target.flags
401 #define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1
402 #define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2
404 struct hv_device_interrupt_target
{
409 struct hv_vp_set vp_set
;
413 struct retarget_msi_interrupt
{
414 u64 partition_id
; /* use "self" */
416 struct hv_interrupt_entry int_entry
;
418 struct hv_device_interrupt_target int_target
;
422 * Driver specific state.
425 enum hv_pcibus_state
{
433 struct hv_pcibus_device
{
434 struct pci_sysdata sysdata
;
435 enum hv_pcibus_state state
;
436 atomic_t remove_lock
;
437 struct hv_device
*hdev
;
438 resource_size_t low_mmio_space
;
439 resource_size_t high_mmio_space
;
440 struct resource
*mem_config
;
441 struct resource
*low_mmio_res
;
442 struct resource
*high_mmio_res
;
443 struct completion
*survey_event
;
444 struct completion remove_event
;
445 struct pci_bus
*pci_bus
;
446 spinlock_t config_lock
; /* Avoid two threads writing index page */
447 spinlock_t device_list_lock
; /* Protect lists below */
448 void __iomem
*cfg_addr
;
450 struct list_head resources_for_children
;
452 struct list_head children
;
453 struct list_head dr_list
;
455 struct msi_domain_info msi_info
;
456 struct msi_controller msi_chip
;
457 struct irq_domain
*irq_domain
;
459 /* hypercall arg, must not cross page boundary */
460 struct retarget_msi_interrupt retarget_msi_interrupt_params
;
462 spinlock_t retarget_msi_interrupt_lock
;
464 struct workqueue_struct
*wq
;
468 * Tracks "Device Relations" messages from the host, which must be both
469 * processed in order and deferred so that they don't run in the context
470 * of the incoming packet callback.
473 struct work_struct wrk
;
474 struct hv_pcibus_device
*bus
;
478 struct list_head list_entry
;
480 struct pci_function_description func
[0];
483 enum hv_pcichild_state
{
484 hv_pcichild_init
= 0,
485 hv_pcichild_requirements
,
486 hv_pcichild_resourced
,
487 hv_pcichild_ejecting
,
491 enum hv_pcidev_ref_reason
{
492 hv_pcidev_ref_invalid
= 0,
493 hv_pcidev_ref_initial
,
494 hv_pcidev_ref_by_slot
,
495 hv_pcidev_ref_packet
,
497 hv_pcidev_ref_childlist
,
503 /* List protected by pci_rescan_remove_lock */
504 struct list_head list_entry
;
506 enum hv_pcichild_state state
;
507 struct pci_function_description desc
;
508 bool reported_missing
;
509 struct hv_pcibus_device
*hbus
;
510 struct work_struct wrk
;
513 * What would be observed if one wrote 0xFFFFFFFF to a BAR and then
514 * read it back, for each of the BAR offsets within config space.
519 struct hv_pci_compl
{
520 struct completion host_event
;
521 s32 completion_status
;
524 static void hv_pci_onchannelcallback(void *context
);
527 * hv_pci_generic_compl() - Invoked for a completion packet
528 * @context: Set up by the sender of the packet.
529 * @resp: The response packet
530 * @resp_packet_size: Size in bytes of the packet
532 * This function is used to trigger an event and report status
533 * for any message for which the completion packet contains a
534 * status and nothing else.
536 static void hv_pci_generic_compl(void *context
, struct pci_response
*resp
,
537 int resp_packet_size
)
539 struct hv_pci_compl
*comp_pkt
= context
;
541 if (resp_packet_size
>= offsetofend(struct pci_response
, status
))
542 comp_pkt
->completion_status
= resp
->status
;
544 comp_pkt
->completion_status
= -1;
546 complete(&comp_pkt
->host_event
);
549 static struct hv_pci_dev
*get_pcichild_wslot(struct hv_pcibus_device
*hbus
,
551 static void get_pcichild(struct hv_pci_dev
*hv_pcidev
,
552 enum hv_pcidev_ref_reason reason
);
553 static void put_pcichild(struct hv_pci_dev
*hv_pcidev
,
554 enum hv_pcidev_ref_reason reason
);
556 static void get_hvpcibus(struct hv_pcibus_device
*hv_pcibus
);
557 static void put_hvpcibus(struct hv_pcibus_device
*hv_pcibus
);
560 * devfn_to_wslot() - Convert from Linux PCI slot to Windows
561 * @devfn: The Linux representation of PCI slot
563 * Windows uses a slightly different representation of PCI slot.
565 * Return: The Windows representation
567 static u32
devfn_to_wslot(int devfn
)
569 union win_slot_encoding wslot
;
572 wslot
.bits
.dev
= PCI_SLOT(devfn
);
573 wslot
.bits
.func
= PCI_FUNC(devfn
);
579 * wslot_to_devfn() - Convert from Windows PCI slot to Linux
580 * @wslot: The Windows representation of PCI slot
582 * Windows uses a slightly different representation of PCI slot.
584 * Return: The Linux representation
586 static int wslot_to_devfn(u32 wslot
)
588 union win_slot_encoding slot_no
;
590 slot_no
.slot
= wslot
;
591 return PCI_DEVFN(slot_no
.bits
.dev
, slot_no
.bits
.func
);
595 * PCI Configuration Space for these root PCI buses is implemented as a pair
596 * of pages in memory-mapped I/O space. Writing to the first page chooses
597 * the PCI function being written or read. Once the first page has been
598 * written to, the following page maps in the entire configuration space of
603 * _hv_pcifront_read_config() - Internal PCI config read
604 * @hpdev: The PCI driver's representation of the device
605 * @where: Offset within config space
606 * @size: Size of the transfer
607 * @val: Pointer to the buffer receiving the data
609 static void _hv_pcifront_read_config(struct hv_pci_dev
*hpdev
, int where
,
613 void __iomem
*addr
= hpdev
->hbus
->cfg_addr
+ CFG_PAGE_OFFSET
+ where
;
616 * If the attempt is to read the IDs or the ROM BAR, simulate that.
618 if (where
+ size
<= PCI_COMMAND
) {
619 memcpy(val
, ((u8
*)&hpdev
->desc
.v_id
) + where
, size
);
620 } else if (where
>= PCI_CLASS_REVISION
&& where
+ size
<=
621 PCI_CACHE_LINE_SIZE
) {
622 memcpy(val
, ((u8
*)&hpdev
->desc
.rev
) + where
-
623 PCI_CLASS_REVISION
, size
);
624 } else if (where
>= PCI_SUBSYSTEM_VENDOR_ID
&& where
+ size
<=
626 memcpy(val
, (u8
*)&hpdev
->desc
.subsystem_id
+ where
-
627 PCI_SUBSYSTEM_VENDOR_ID
, size
);
628 } else if (where
>= PCI_ROM_ADDRESS
&& where
+ size
<=
629 PCI_CAPABILITY_LIST
) {
630 /* ROM BARs are unimplemented */
632 } else if (where
>= PCI_INTERRUPT_LINE
&& where
+ size
<=
635 * Interrupt Line and Interrupt PIN are hard-wired to zero
636 * because this front-end only supports message-signaled
640 } else if (where
+ size
<= CFG_PAGE_SIZE
) {
641 spin_lock_irqsave(&hpdev
->hbus
->config_lock
, flags
);
642 /* Choose the function to be read. (See comment above) */
643 writel(hpdev
->desc
.win_slot
.slot
, hpdev
->hbus
->cfg_addr
);
644 /* Make sure the function was chosen before we start reading. */
646 /* Read from that function's config space. */
659 * Make sure the write was done before we release the spinlock
660 * allowing consecutive reads/writes.
663 spin_unlock_irqrestore(&hpdev
->hbus
->config_lock
, flags
);
665 dev_err(&hpdev
->hbus
->hdev
->device
,
666 "Attempt to read beyond a function's config space.\n");
670 static u16
hv_pcifront_get_vendor_id(struct hv_pci_dev
*hpdev
)
674 void __iomem
*addr
= hpdev
->hbus
->cfg_addr
+ CFG_PAGE_OFFSET
+
677 spin_lock_irqsave(&hpdev
->hbus
->config_lock
, flags
);
679 /* Choose the function to be read. (See comment above) */
680 writel(hpdev
->desc
.win_slot
.slot
, hpdev
->hbus
->cfg_addr
);
681 /* Make sure the function was chosen before we start reading. */
683 /* Read from that function's config space. */
686 * mb() is not required here, because the spin_unlock_irqrestore()
690 spin_unlock_irqrestore(&hpdev
->hbus
->config_lock
, flags
);
696 * _hv_pcifront_write_config() - Internal PCI config write
697 * @hpdev: The PCI driver's representation of the device
698 * @where: Offset within config space
699 * @size: Size of the transfer
700 * @val: The data being transferred
702 static void _hv_pcifront_write_config(struct hv_pci_dev
*hpdev
, int where
,
706 void __iomem
*addr
= hpdev
->hbus
->cfg_addr
+ CFG_PAGE_OFFSET
+ where
;
708 if (where
>= PCI_SUBSYSTEM_VENDOR_ID
&&
709 where
+ size
<= PCI_CAPABILITY_LIST
) {
710 /* SSIDs and ROM BARs are read-only */
711 } else if (where
>= PCI_COMMAND
&& where
+ size
<= CFG_PAGE_SIZE
) {
712 spin_lock_irqsave(&hpdev
->hbus
->config_lock
, flags
);
713 /* Choose the function to be written. (See comment above) */
714 writel(hpdev
->desc
.win_slot
.slot
, hpdev
->hbus
->cfg_addr
);
715 /* Make sure the function was chosen before we start writing. */
717 /* Write to that function's config space. */
730 * Make sure the write was done before we release the spinlock
731 * allowing consecutive reads/writes.
734 spin_unlock_irqrestore(&hpdev
->hbus
->config_lock
, flags
);
736 dev_err(&hpdev
->hbus
->hdev
->device
,
737 "Attempt to write beyond a function's config space.\n");
742 * hv_pcifront_read_config() - Read configuration space
743 * @bus: PCI Bus structure
744 * @devfn: Device/function
745 * @where: Offset from base
746 * @size: Byte/word/dword
747 * @val: Value to be read
749 * Return: PCIBIOS_SUCCESSFUL on success
750 * PCIBIOS_DEVICE_NOT_FOUND on failure
752 static int hv_pcifront_read_config(struct pci_bus
*bus
, unsigned int devfn
,
753 int where
, int size
, u32
*val
)
755 struct hv_pcibus_device
*hbus
=
756 container_of(bus
->sysdata
, struct hv_pcibus_device
, sysdata
);
757 struct hv_pci_dev
*hpdev
;
759 hpdev
= get_pcichild_wslot(hbus
, devfn_to_wslot(devfn
));
761 return PCIBIOS_DEVICE_NOT_FOUND
;
763 _hv_pcifront_read_config(hpdev
, where
, size
, val
);
765 put_pcichild(hpdev
, hv_pcidev_ref_by_slot
);
766 return PCIBIOS_SUCCESSFUL
;
770 * hv_pcifront_write_config() - Write configuration space
771 * @bus: PCI Bus structure
772 * @devfn: Device/function
773 * @where: Offset from base
774 * @size: Byte/word/dword
775 * @val: Value to be written to device
777 * Return: PCIBIOS_SUCCESSFUL on success
778 * PCIBIOS_DEVICE_NOT_FOUND on failure
780 static int hv_pcifront_write_config(struct pci_bus
*bus
, unsigned int devfn
,
781 int where
, int size
, u32 val
)
783 struct hv_pcibus_device
*hbus
=
784 container_of(bus
->sysdata
, struct hv_pcibus_device
, sysdata
);
785 struct hv_pci_dev
*hpdev
;
787 hpdev
= get_pcichild_wslot(hbus
, devfn_to_wslot(devfn
));
789 return PCIBIOS_DEVICE_NOT_FOUND
;
791 _hv_pcifront_write_config(hpdev
, where
, size
, val
);
793 put_pcichild(hpdev
, hv_pcidev_ref_by_slot
);
794 return PCIBIOS_SUCCESSFUL
;
797 /* PCIe operations */
798 static struct pci_ops hv_pcifront_ops
= {
799 .read
= hv_pcifront_read_config
,
800 .write
= hv_pcifront_write_config
,
803 /* Interrupt management hooks */
804 static void hv_int_desc_free(struct hv_pci_dev
*hpdev
,
805 struct tran_int_desc
*int_desc
)
807 struct pci_delete_interrupt
*int_pkt
;
809 struct pci_packet pkt
;
810 u8 buffer
[sizeof(struct pci_delete_interrupt
)];
813 memset(&ctxt
, 0, sizeof(ctxt
));
814 int_pkt
= (struct pci_delete_interrupt
*)&ctxt
.pkt
.message
;
815 int_pkt
->message_type
.type
=
816 PCI_DELETE_INTERRUPT_MESSAGE
;
817 int_pkt
->wslot
.slot
= hpdev
->desc
.win_slot
.slot
;
818 int_pkt
->int_desc
= *int_desc
;
819 vmbus_sendpacket(hpdev
->hbus
->hdev
->channel
, int_pkt
, sizeof(*int_pkt
),
820 (unsigned long)&ctxt
.pkt
, VM_PKT_DATA_INBAND
, 0);
825 * hv_msi_free() - Free the MSI.
826 * @domain: The interrupt domain pointer
827 * @info: Extra MSI-related context
828 * @irq: Identifies the IRQ.
830 * The Hyper-V parent partition and hypervisor are tracking the
831 * messages that are in use, keeping the interrupt redirection
832 * table up to date. This callback sends a message that frees
833 * the IRT entry and related tracking nonsense.
835 static void hv_msi_free(struct irq_domain
*domain
, struct msi_domain_info
*info
,
838 struct hv_pcibus_device
*hbus
;
839 struct hv_pci_dev
*hpdev
;
840 struct pci_dev
*pdev
;
841 struct tran_int_desc
*int_desc
;
842 struct irq_data
*irq_data
= irq_domain_get_irq_data(domain
, irq
);
843 struct msi_desc
*msi
= irq_data_get_msi_desc(irq_data
);
845 pdev
= msi_desc_to_pci_dev(msi
);
847 int_desc
= irq_data_get_irq_chip_data(irq_data
);
851 irq_data
->chip_data
= NULL
;
852 hpdev
= get_pcichild_wslot(hbus
, devfn_to_wslot(pdev
->devfn
));
858 hv_int_desc_free(hpdev
, int_desc
);
859 put_pcichild(hpdev
, hv_pcidev_ref_by_slot
);
862 static int hv_set_affinity(struct irq_data
*data
, const struct cpumask
*dest
,
865 struct irq_data
*parent
= data
->parent_data
;
867 return parent
->chip
->irq_set_affinity(parent
, dest
, force
);
870 static void hv_irq_mask(struct irq_data
*data
)
872 pci_msi_mask_irq(data
);
876 * hv_irq_unmask() - "Unmask" the IRQ by setting its current
878 * @data: Describes the IRQ
880 * Build new a destination for the MSI and make a hypercall to
881 * update the Interrupt Redirection Table. "Device Logical ID"
882 * is built out of this PCI bus's instance GUID and the function
883 * number of the device.
885 static void hv_irq_unmask(struct irq_data
*data
)
887 struct msi_desc
*msi_desc
= irq_data_get_msi_desc(data
);
888 struct irq_cfg
*cfg
= irqd_cfg(data
);
889 struct retarget_msi_interrupt
*params
;
890 struct hv_pcibus_device
*hbus
;
891 struct cpumask
*dest
;
892 struct pci_bus
*pbus
;
893 struct pci_dev
*pdev
;
900 dest
= irq_data_get_effective_affinity_mask(data
);
901 pdev
= msi_desc_to_pci_dev(msi_desc
);
903 hbus
= container_of(pbus
->sysdata
, struct hv_pcibus_device
, sysdata
);
905 spin_lock_irqsave(&hbus
->retarget_msi_interrupt_lock
, flags
);
907 params
= &hbus
->retarget_msi_interrupt_params
;
908 memset(params
, 0, sizeof(*params
));
909 params
->partition_id
= HV_PARTITION_ID_SELF
;
910 params
->int_entry
.source
= 1; /* MSI(-X) */
911 params
->int_entry
.address
= msi_desc
->msg
.address_lo
;
912 params
->int_entry
.data
= msi_desc
->msg
.data
;
913 params
->device_id
= (hbus
->hdev
->dev_instance
.b
[5] << 24) |
914 (hbus
->hdev
->dev_instance
.b
[4] << 16) |
915 (hbus
->hdev
->dev_instance
.b
[7] << 8) |
916 (hbus
->hdev
->dev_instance
.b
[6] & 0xf8) |
917 PCI_FUNC(pdev
->devfn
);
918 params
->int_target
.vector
= cfg
->vector
;
921 * Honoring apic->irq_delivery_mode set to dest_Fixed by
922 * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a
923 * spurious interrupt storm. Not doing so does not seem to have a
924 * negative effect (yet?).
927 if (pci_protocol_version
>= PCI_PROTOCOL_VERSION_1_2
) {
929 * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
930 * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
931 * with >64 VP support.
932 * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
933 * is not sufficient for this hypercall.
935 params
->int_target
.flags
|=
936 HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET
;
937 params
->int_target
.vp_set
.valid_banks
=
938 (1ull << HV_VP_SET_BANK_COUNT_MAX
) - 1;
941 * var-sized hypercall, var-size starts after vp_mask (thus
942 * vp_set.format does not count, but vp_set.valid_banks does).
944 var_size
= 1 + HV_VP_SET_BANK_COUNT_MAX
;
946 for_each_cpu_and(cpu
, dest
, cpu_online_mask
) {
947 cpu_vmbus
= hv_cpu_number_to_vp_number(cpu
);
949 if (cpu_vmbus
>= HV_VP_SET_BANK_COUNT_MAX
* 64) {
950 dev_err(&hbus
->hdev
->device
,
951 "too high CPU %d", cpu_vmbus
);
956 params
->int_target
.vp_set
.masks
[cpu_vmbus
/ 64] |=
957 (1ULL << (cpu_vmbus
& 63));
960 for_each_cpu_and(cpu
, dest
, cpu_online_mask
) {
961 params
->int_target
.vp_mask
|=
962 (1ULL << hv_cpu_number_to_vp_number(cpu
));
966 res
= hv_do_hypercall(HVCALL_RETARGET_INTERRUPT
| (var_size
<< 17),
970 spin_unlock_irqrestore(&hbus
->retarget_msi_interrupt_lock
, flags
);
973 dev_err(&hbus
->hdev
->device
,
974 "%s() failed: %#llx", __func__
, res
);
978 pci_msi_unmask_irq(data
);
981 struct compose_comp_ctxt
{
982 struct hv_pci_compl comp_pkt
;
983 struct tran_int_desc int_desc
;
986 static void hv_pci_compose_compl(void *context
, struct pci_response
*resp
,
987 int resp_packet_size
)
989 struct compose_comp_ctxt
*comp_pkt
= context
;
990 struct pci_create_int_response
*int_resp
=
991 (struct pci_create_int_response
*)resp
;
993 comp_pkt
->comp_pkt
.completion_status
= resp
->status
;
994 comp_pkt
->int_desc
= int_resp
->int_desc
;
995 complete(&comp_pkt
->comp_pkt
.host_event
);
998 static u32
hv_compose_msi_req_v1(
999 struct pci_create_interrupt
*int_pkt
, struct cpumask
*affinity
,
1000 u32 slot
, u8 vector
)
1002 int_pkt
->message_type
.type
= PCI_CREATE_INTERRUPT_MESSAGE
;
1003 int_pkt
->wslot
.slot
= slot
;
1004 int_pkt
->int_desc
.vector
= vector
;
1005 int_pkt
->int_desc
.vector_count
= 1;
1006 int_pkt
->int_desc
.delivery_mode
= dest_Fixed
;
1009 * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
1012 int_pkt
->int_desc
.cpu_mask
= CPU_AFFINITY_ALL
;
1014 return sizeof(*int_pkt
);
1017 static u32
hv_compose_msi_req_v2(
1018 struct pci_create_interrupt2
*int_pkt
, struct cpumask
*affinity
,
1019 u32 slot
, u8 vector
)
1023 int_pkt
->message_type
.type
= PCI_CREATE_INTERRUPT_MESSAGE2
;
1024 int_pkt
->wslot
.slot
= slot
;
1025 int_pkt
->int_desc
.vector
= vector
;
1026 int_pkt
->int_desc
.vector_count
= 1;
1027 int_pkt
->int_desc
.delivery_mode
= dest_Fixed
;
1030 * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
1031 * by subsequent retarget in hv_irq_unmask().
1033 cpu
= cpumask_first_and(affinity
, cpu_online_mask
);
1034 int_pkt
->int_desc
.processor_array
[0] =
1035 hv_cpu_number_to_vp_number(cpu
);
1036 int_pkt
->int_desc
.processor_count
= 1;
1038 return sizeof(*int_pkt
);
1042 * hv_compose_msi_msg() - Supplies a valid MSI address/data
1043 * @data: Everything about this MSI
1044 * @msg: Buffer that is filled in by this function
1046 * This function unpacks the IRQ looking for target CPU set, IDT
1047 * vector and mode and sends a message to the parent partition
1048 * asking for a mapping for that tuple in this partition. The
1049 * response supplies a data value and address to which that data
1050 * should be written to trigger that interrupt.
1052 static void hv_compose_msi_msg(struct irq_data
*data
, struct msi_msg
*msg
)
1054 struct irq_cfg
*cfg
= irqd_cfg(data
);
1055 struct hv_pcibus_device
*hbus
;
1056 struct hv_pci_dev
*hpdev
;
1057 struct pci_bus
*pbus
;
1058 struct pci_dev
*pdev
;
1059 struct cpumask
*dest
;
1060 struct compose_comp_ctxt comp
;
1061 struct tran_int_desc
*int_desc
;
1063 struct pci_packet pci_pkt
;
1065 struct pci_create_interrupt v1
;
1066 struct pci_create_interrupt2 v2
;
1073 pdev
= msi_desc_to_pci_dev(irq_data_get_msi_desc(data
));
1074 dest
= irq_data_get_effective_affinity_mask(data
);
1076 hbus
= container_of(pbus
->sysdata
, struct hv_pcibus_device
, sysdata
);
1077 hpdev
= get_pcichild_wslot(hbus
, devfn_to_wslot(pdev
->devfn
));
1079 goto return_null_message
;
1081 /* Free any previous message that might have already been composed. */
1082 if (data
->chip_data
) {
1083 int_desc
= data
->chip_data
;
1084 data
->chip_data
= NULL
;
1085 hv_int_desc_free(hpdev
, int_desc
);
1088 int_desc
= kzalloc(sizeof(*int_desc
), GFP_ATOMIC
);
1090 goto drop_reference
;
1092 memset(&ctxt
, 0, sizeof(ctxt
));
1093 init_completion(&comp
.comp_pkt
.host_event
);
1094 ctxt
.pci_pkt
.completion_func
= hv_pci_compose_compl
;
1095 ctxt
.pci_pkt
.compl_ctxt
= &comp
;
1097 switch (pci_protocol_version
) {
1098 case PCI_PROTOCOL_VERSION_1_1
:
1099 size
= hv_compose_msi_req_v1(&ctxt
.int_pkts
.v1
,
1101 hpdev
->desc
.win_slot
.slot
,
1105 case PCI_PROTOCOL_VERSION_1_2
:
1106 size
= hv_compose_msi_req_v2(&ctxt
.int_pkts
.v2
,
1108 hpdev
->desc
.win_slot
.slot
,
1113 /* As we only negotiate protocol versions known to this driver,
1114 * this path should never hit. However, this is it not a hot
1115 * path so we print a message to aid future updates.
1117 dev_err(&hbus
->hdev
->device
,
1118 "Unexpected vPCI protocol, update driver.");
1122 ret
= vmbus_sendpacket(hpdev
->hbus
->hdev
->channel
, &ctxt
.int_pkts
,
1123 size
, (unsigned long)&ctxt
.pci_pkt
,
1125 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
1127 dev_err(&hbus
->hdev
->device
,
1128 "Sending request for interrupt failed: 0x%x",
1129 comp
.comp_pkt
.completion_status
);
1134 * Since this function is called with IRQ locks held, can't
1135 * do normal wait for completion; instead poll.
1137 while (!try_wait_for_completion(&comp
.comp_pkt
.host_event
)) {
1138 /* 0xFFFF means an invalid PCI VENDOR ID. */
1139 if (hv_pcifront_get_vendor_id(hpdev
) == 0xFFFF) {
1140 dev_err_once(&hbus
->hdev
->device
,
1141 "the device has gone\n");
1146 * When the higher level interrupt code calls us with
1147 * interrupt disabled, we must poll the channel by calling
1148 * the channel callback directly when channel->target_cpu is
1149 * the current CPU. When the higher level interrupt code
1150 * calls us with interrupt enabled, let's add the
1151 * local_bh_disable()/enable() to avoid race.
1155 if (hbus
->hdev
->channel
->target_cpu
== smp_processor_id())
1156 hv_pci_onchannelcallback(hbus
);
1160 if (hpdev
->state
== hv_pcichild_ejecting
) {
1161 dev_err_once(&hbus
->hdev
->device
,
1162 "the device is being ejected\n");
1169 if (comp
.comp_pkt
.completion_status
< 0) {
1170 dev_err(&hbus
->hdev
->device
,
1171 "Request for interrupt failed: 0x%x",
1172 comp
.comp_pkt
.completion_status
);
1177 * Record the assignment so that this can be unwound later. Using
1178 * irq_set_chip_data() here would be appropriate, but the lock it takes
1181 *int_desc
= comp
.int_desc
;
1182 data
->chip_data
= int_desc
;
1184 /* Pass up the result. */
1185 msg
->address_hi
= comp
.int_desc
.address
>> 32;
1186 msg
->address_lo
= comp
.int_desc
.address
& 0xffffffff;
1187 msg
->data
= comp
.int_desc
.data
;
1189 put_pcichild(hpdev
, hv_pcidev_ref_by_slot
);
1195 put_pcichild(hpdev
, hv_pcidev_ref_by_slot
);
1196 return_null_message
:
1197 msg
->address_hi
= 0;
1198 msg
->address_lo
= 0;
1202 /* HW Interrupt Chip Descriptor */
1203 static struct irq_chip hv_msi_irq_chip
= {
1204 .name
= "Hyper-V PCIe MSI",
1205 .irq_compose_msi_msg
= hv_compose_msi_msg
,
1206 .irq_set_affinity
= hv_set_affinity
,
1207 .irq_ack
= irq_chip_ack_parent
,
1208 .irq_mask
= hv_irq_mask
,
1209 .irq_unmask
= hv_irq_unmask
,
1212 static irq_hw_number_t
hv_msi_domain_ops_get_hwirq(struct msi_domain_info
*info
,
1213 msi_alloc_info_t
*arg
)
1215 return arg
->msi_hwirq
;
1218 static struct msi_domain_ops hv_msi_ops
= {
1219 .get_hwirq
= hv_msi_domain_ops_get_hwirq
,
1220 .msi_prepare
= pci_msi_prepare
,
1221 .set_desc
= pci_msi_set_desc
,
1222 .msi_free
= hv_msi_free
,
1226 * hv_pcie_init_irq_domain() - Initialize IRQ domain
1227 * @hbus: The root PCI bus
1229 * This function creates an IRQ domain which will be used for
1230 * interrupts from devices that have been passed through. These
1231 * devices only support MSI and MSI-X, not line-based interrupts
1232 * or simulations of line-based interrupts through PCIe's
1233 * fabric-layer messages. Because interrupts are remapped, we
1234 * can support multi-message MSI here.
1236 * Return: '0' on success and error value on failure
1238 static int hv_pcie_init_irq_domain(struct hv_pcibus_device
*hbus
)
1240 hbus
->msi_info
.chip
= &hv_msi_irq_chip
;
1241 hbus
->msi_info
.ops
= &hv_msi_ops
;
1242 hbus
->msi_info
.flags
= (MSI_FLAG_USE_DEF_DOM_OPS
|
1243 MSI_FLAG_USE_DEF_CHIP_OPS
| MSI_FLAG_MULTI_PCI_MSI
|
1245 hbus
->msi_info
.handler
= handle_edge_irq
;
1246 hbus
->msi_info
.handler_name
= "edge";
1247 hbus
->msi_info
.data
= hbus
;
1248 hbus
->irq_domain
= pci_msi_create_irq_domain(hbus
->sysdata
.fwnode
,
1251 if (!hbus
->irq_domain
) {
1252 dev_err(&hbus
->hdev
->device
,
1253 "Failed to build an MSI IRQ domain\n");
1261 * get_bar_size() - Get the address space consumed by a BAR
1262 * @bar_val: Value that a BAR returned after -1 was written
1265 * This function returns the size of the BAR, rounded up to 1
1266 * page. It has to be rounded up because the hypervisor's page
1267 * table entry that maps the BAR into the VM can't specify an
1268 * offset within a page. The invariant is that the hypervisor
1269 * must place any BARs of smaller than page length at the
1270 * beginning of a page.
1272 * Return: Size in bytes of the consumed MMIO space.
1274 static u64
get_bar_size(u64 bar_val
)
1276 return round_up((1 + ~(bar_val
& PCI_BASE_ADDRESS_MEM_MASK
)),
1281 * survey_child_resources() - Total all MMIO requirements
1282 * @hbus: Root PCI bus, as understood by this driver
1284 static void survey_child_resources(struct hv_pcibus_device
*hbus
)
1286 struct list_head
*iter
;
1287 struct hv_pci_dev
*hpdev
;
1288 resource_size_t bar_size
= 0;
1289 unsigned long flags
;
1290 struct completion
*event
;
1294 /* If nobody is waiting on the answer, don't compute it. */
1295 event
= xchg(&hbus
->survey_event
, NULL
);
1299 /* If the answer has already been computed, go with it. */
1300 if (hbus
->low_mmio_space
|| hbus
->high_mmio_space
) {
1305 spin_lock_irqsave(&hbus
->device_list_lock
, flags
);
1308 * Due to an interesting quirk of the PCI spec, all memory regions
1309 * for a child device are a power of 2 in size and aligned in memory,
1310 * so it's sufficient to just add them up without tracking alignment.
1312 list_for_each(iter
, &hbus
->children
) {
1313 hpdev
= container_of(iter
, struct hv_pci_dev
, list_entry
);
1314 for (i
= 0; i
< 6; i
++) {
1315 if (hpdev
->probed_bar
[i
] & PCI_BASE_ADDRESS_SPACE_IO
)
1316 dev_err(&hbus
->hdev
->device
,
1317 "There's an I/O BAR in this list!\n");
1319 if (hpdev
->probed_bar
[i
] != 0) {
1321 * A probed BAR has all the upper bits set that
1325 bar_val
= hpdev
->probed_bar
[i
];
1326 if (bar_val
& PCI_BASE_ADDRESS_MEM_TYPE_64
)
1328 ((u64
)hpdev
->probed_bar
[++i
] << 32);
1330 bar_val
|= 0xffffffff00000000ULL
;
1332 bar_size
= get_bar_size(bar_val
);
1334 if (bar_val
& PCI_BASE_ADDRESS_MEM_TYPE_64
)
1335 hbus
->high_mmio_space
+= bar_size
;
1337 hbus
->low_mmio_space
+= bar_size
;
1342 spin_unlock_irqrestore(&hbus
->device_list_lock
, flags
);
1347 * prepopulate_bars() - Fill in BARs with defaults
1348 * @hbus: Root PCI bus, as understood by this driver
1350 * The core PCI driver code seems much, much happier if the BARs
1351 * for a device have values upon first scan. So fill them in.
1352 * The algorithm below works down from large sizes to small,
1353 * attempting to pack the assignments optimally. The assumption,
1354 * enforced in other parts of the code, is that the beginning of
1355 * the memory-mapped I/O space will be aligned on the largest
1358 static void prepopulate_bars(struct hv_pcibus_device
*hbus
)
1360 resource_size_t high_size
= 0;
1361 resource_size_t low_size
= 0;
1362 resource_size_t high_base
= 0;
1363 resource_size_t low_base
= 0;
1364 resource_size_t bar_size
;
1365 struct hv_pci_dev
*hpdev
;
1366 struct list_head
*iter
;
1367 unsigned long flags
;
1373 if (hbus
->low_mmio_space
) {
1374 low_size
= 1ULL << (63 - __builtin_clzll(hbus
->low_mmio_space
));
1375 low_base
= hbus
->low_mmio_res
->start
;
1378 if (hbus
->high_mmio_space
) {
1380 (63 - __builtin_clzll(hbus
->high_mmio_space
));
1381 high_base
= hbus
->high_mmio_res
->start
;
1384 spin_lock_irqsave(&hbus
->device_list_lock
, flags
);
1386 /* Pick addresses for the BARs. */
1388 list_for_each(iter
, &hbus
->children
) {
1389 hpdev
= container_of(iter
, struct hv_pci_dev
,
1391 for (i
= 0; i
< 6; i
++) {
1392 bar_val
= hpdev
->probed_bar
[i
];
1395 high
= bar_val
& PCI_BASE_ADDRESS_MEM_TYPE_64
;
1398 ((u64
)hpdev
->probed_bar
[i
+ 1]
1401 bar_val
|= 0xffffffffULL
<< 32;
1403 bar_size
= get_bar_size(bar_val
);
1405 if (high_size
!= bar_size
) {
1409 _hv_pcifront_write_config(hpdev
,
1410 PCI_BASE_ADDRESS_0
+ (4 * i
),
1412 (u32
)(high_base
& 0xffffff00));
1414 _hv_pcifront_write_config(hpdev
,
1415 PCI_BASE_ADDRESS_0
+ (4 * i
),
1416 4, (u32
)(high_base
>> 32));
1417 high_base
+= bar_size
;
1419 if (low_size
!= bar_size
)
1421 _hv_pcifront_write_config(hpdev
,
1422 PCI_BASE_ADDRESS_0
+ (4 * i
),
1424 (u32
)(low_base
& 0xffffff00));
1425 low_base
+= bar_size
;
1428 if (high_size
<= 1 && low_size
<= 1) {
1429 /* Set the memory enable bit. */
1430 _hv_pcifront_read_config(hpdev
, PCI_COMMAND
, 2,
1432 command
|= PCI_COMMAND_MEMORY
;
1433 _hv_pcifront_write_config(hpdev
, PCI_COMMAND
, 2,
1441 } while (high_size
|| low_size
);
1443 spin_unlock_irqrestore(&hbus
->device_list_lock
, flags
);
1447 * create_root_hv_pci_bus() - Expose a new root PCI bus
1448 * @hbus: Root PCI bus, as understood by this driver
1450 * Return: 0 on success, -errno on failure
1452 static int create_root_hv_pci_bus(struct hv_pcibus_device
*hbus
)
1454 /* Register the device */
1455 hbus
->pci_bus
= pci_create_root_bus(&hbus
->hdev
->device
,
1456 0, /* bus number is always zero */
1459 &hbus
->resources_for_children
);
1463 hbus
->pci_bus
->msi
= &hbus
->msi_chip
;
1464 hbus
->pci_bus
->msi
->dev
= &hbus
->hdev
->device
;
1466 pci_lock_rescan_remove();
1467 pci_scan_child_bus(hbus
->pci_bus
);
1468 pci_bus_assign_resources(hbus
->pci_bus
);
1469 pci_bus_add_devices(hbus
->pci_bus
);
1470 pci_unlock_rescan_remove();
1471 hbus
->state
= hv_pcibus_installed
;
1475 struct q_res_req_compl
{
1476 struct completion host_event
;
1477 struct hv_pci_dev
*hpdev
;
1481 * q_resource_requirements() - Query Resource Requirements
1482 * @context: The completion context.
1483 * @resp: The response that came from the host.
1484 * @resp_packet_size: The size in bytes of resp.
1486 * This function is invoked on completion of a Query Resource
1487 * Requirements packet.
1489 static void q_resource_requirements(void *context
, struct pci_response
*resp
,
1490 int resp_packet_size
)
1492 struct q_res_req_compl
*completion
= context
;
1493 struct pci_q_res_req_response
*q_res_req
=
1494 (struct pci_q_res_req_response
*)resp
;
1497 if (resp
->status
< 0) {
1498 dev_err(&completion
->hpdev
->hbus
->hdev
->device
,
1499 "query resource requirements failed: %x\n",
1502 for (i
= 0; i
< 6; i
++) {
1503 completion
->hpdev
->probed_bar
[i
] =
1504 q_res_req
->probed_bar
[i
];
1508 complete(&completion
->host_event
);
1511 static void get_pcichild(struct hv_pci_dev
*hpdev
,
1512 enum hv_pcidev_ref_reason reason
)
1514 refcount_inc(&hpdev
->refs
);
1517 static void put_pcichild(struct hv_pci_dev
*hpdev
,
1518 enum hv_pcidev_ref_reason reason
)
1520 if (refcount_dec_and_test(&hpdev
->refs
))
1525 * new_pcichild_device() - Create a new child device
1526 * @hbus: The internal struct tracking this root PCI bus.
1527 * @desc: The information supplied so far from the host
1530 * This function creates the tracking structure for a new child
1531 * device and kicks off the process of figuring out what it is.
1533 * Return: Pointer to the new tracking struct
1535 static struct hv_pci_dev
*new_pcichild_device(struct hv_pcibus_device
*hbus
,
1536 struct pci_function_description
*desc
)
1538 struct hv_pci_dev
*hpdev
;
1539 struct pci_child_message
*res_req
;
1540 struct q_res_req_compl comp_pkt
;
1542 struct pci_packet init_packet
;
1543 u8 buffer
[sizeof(struct pci_child_message
)];
1545 unsigned long flags
;
1548 hpdev
= kzalloc(sizeof(*hpdev
), GFP_ATOMIC
);
1554 memset(&pkt
, 0, sizeof(pkt
));
1555 init_completion(&comp_pkt
.host_event
);
1556 comp_pkt
.hpdev
= hpdev
;
1557 pkt
.init_packet
.compl_ctxt
= &comp_pkt
;
1558 pkt
.init_packet
.completion_func
= q_resource_requirements
;
1559 res_req
= (struct pci_child_message
*)&pkt
.init_packet
.message
;
1560 res_req
->message_type
.type
= PCI_QUERY_RESOURCE_REQUIREMENTS
;
1561 res_req
->wslot
.slot
= desc
->win_slot
.slot
;
1563 ret
= vmbus_sendpacket(hbus
->hdev
->channel
, res_req
,
1564 sizeof(struct pci_child_message
),
1565 (unsigned long)&pkt
.init_packet
,
1567 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
1571 wait_for_completion(&comp_pkt
.host_event
);
1573 hpdev
->desc
= *desc
;
1574 refcount_set(&hpdev
->refs
, 1);
1575 get_pcichild(hpdev
, hv_pcidev_ref_childlist
);
1576 spin_lock_irqsave(&hbus
->device_list_lock
, flags
);
1579 * When a device is being added to the bus, we set the PCI domain
1580 * number to be the device serial number, which is non-zero and
1581 * unique on the same VM. The serial numbers start with 1, and
1582 * increase by 1 for each device. So device names including this
1583 * can have shorter names than based on the bus instance UUID.
1584 * Only the first device serial number is used for domain, so the
1585 * domain number will not change after the first device is added.
1587 if (list_empty(&hbus
->children
))
1588 hbus
->sysdata
.domain
= desc
->ser
;
1589 list_add_tail(&hpdev
->list_entry
, &hbus
->children
);
1590 spin_unlock_irqrestore(&hbus
->device_list_lock
, flags
);
1599 * get_pcichild_wslot() - Find device from slot
1600 * @hbus: Root PCI bus, as understood by this driver
1601 * @wslot: Location on the bus
1603 * This function looks up a PCI device and returns the internal
1604 * representation of it. It acquires a reference on it, so that
1605 * the device won't be deleted while somebody is using it. The
1606 * caller is responsible for calling put_pcichild() to release
1609 * Return: Internal representation of a PCI device
1611 static struct hv_pci_dev
*get_pcichild_wslot(struct hv_pcibus_device
*hbus
,
1614 unsigned long flags
;
1615 struct hv_pci_dev
*iter
, *hpdev
= NULL
;
1617 spin_lock_irqsave(&hbus
->device_list_lock
, flags
);
1618 list_for_each_entry(iter
, &hbus
->children
, list_entry
) {
1619 if (iter
->desc
.win_slot
.slot
== wslot
) {
1621 get_pcichild(hpdev
, hv_pcidev_ref_by_slot
);
1625 spin_unlock_irqrestore(&hbus
->device_list_lock
, flags
);
1631 * pci_devices_present_work() - Handle new list of child devices
1632 * @work: Work struct embedded in struct hv_dr_work
1634 * "Bus Relations" is the Windows term for "children of this
1635 * bus." The terminology is preserved here for people trying to
1636 * debug the interaction between Hyper-V and Linux. This
1637 * function is called when the parent partition reports a list
1638 * of functions that should be observed under this PCI Express
1641 * This function updates the list, and must tolerate being
1642 * called multiple times with the same information. The typical
1643 * number of child devices is one, with very atypical cases
1644 * involving three or four, so the algorithms used here can be
1645 * simple and inefficient.
1647 * It must also treat the omission of a previously observed device as
1648 * notification that the device no longer exists.
1650 * Note that this function is serialized with hv_eject_device_work(),
1651 * because both are pushed to the ordered workqueue hbus->wq.
1653 static void pci_devices_present_work(struct work_struct
*work
)
1657 struct list_head
*iter
;
1658 struct pci_function_description
*new_desc
;
1659 struct hv_pci_dev
*hpdev
;
1660 struct hv_pcibus_device
*hbus
;
1661 struct list_head removed
;
1662 struct hv_dr_work
*dr_wrk
;
1663 struct hv_dr_state
*dr
= NULL
;
1664 unsigned long flags
;
1666 dr_wrk
= container_of(work
, struct hv_dr_work
, wrk
);
1670 INIT_LIST_HEAD(&removed
);
1672 /* Pull this off the queue and process it if it was the last one. */
1673 spin_lock_irqsave(&hbus
->device_list_lock
, flags
);
1674 while (!list_empty(&hbus
->dr_list
)) {
1675 dr
= list_first_entry(&hbus
->dr_list
, struct hv_dr_state
,
1677 list_del(&dr
->list_entry
);
1679 /* Throw this away if the list still has stuff in it. */
1680 if (!list_empty(&hbus
->dr_list
)) {
1685 spin_unlock_irqrestore(&hbus
->device_list_lock
, flags
);
1692 /* First, mark all existing children as reported missing. */
1693 spin_lock_irqsave(&hbus
->device_list_lock
, flags
);
1694 list_for_each(iter
, &hbus
->children
) {
1695 hpdev
= container_of(iter
, struct hv_pci_dev
,
1697 hpdev
->reported_missing
= true;
1699 spin_unlock_irqrestore(&hbus
->device_list_lock
, flags
);
1701 /* Next, add back any reported devices. */
1702 for (child_no
= 0; child_no
< dr
->device_count
; child_no
++) {
1704 new_desc
= &dr
->func
[child_no
];
1706 spin_lock_irqsave(&hbus
->device_list_lock
, flags
);
1707 list_for_each(iter
, &hbus
->children
) {
1708 hpdev
= container_of(iter
, struct hv_pci_dev
,
1710 if ((hpdev
->desc
.win_slot
.slot
==
1711 new_desc
->win_slot
.slot
) &&
1712 (hpdev
->desc
.v_id
== new_desc
->v_id
) &&
1713 (hpdev
->desc
.d_id
== new_desc
->d_id
) &&
1714 (hpdev
->desc
.ser
== new_desc
->ser
)) {
1715 hpdev
->reported_missing
= false;
1719 spin_unlock_irqrestore(&hbus
->device_list_lock
, flags
);
1722 hpdev
= new_pcichild_device(hbus
, new_desc
);
1724 dev_err(&hbus
->hdev
->device
,
1725 "couldn't record a child device.\n");
1729 /* Move missing children to a list on the stack. */
1730 spin_lock_irqsave(&hbus
->device_list_lock
, flags
);
1733 list_for_each(iter
, &hbus
->children
) {
1734 hpdev
= container_of(iter
, struct hv_pci_dev
,
1736 if (hpdev
->reported_missing
) {
1738 put_pcichild(hpdev
, hv_pcidev_ref_childlist
);
1739 list_move_tail(&hpdev
->list_entry
, &removed
);
1744 spin_unlock_irqrestore(&hbus
->device_list_lock
, flags
);
1746 /* Delete everything that should no longer exist. */
1747 while (!list_empty(&removed
)) {
1748 hpdev
= list_first_entry(&removed
, struct hv_pci_dev
,
1750 list_del(&hpdev
->list_entry
);
1751 put_pcichild(hpdev
, hv_pcidev_ref_initial
);
1754 switch (hbus
->state
) {
1755 case hv_pcibus_installed
:
1757 * Tell the core to rescan bus
1758 * because there may have been changes.
1760 pci_lock_rescan_remove();
1761 pci_scan_child_bus(hbus
->pci_bus
);
1762 pci_unlock_rescan_remove();
1765 case hv_pcibus_init
:
1766 case hv_pcibus_probed
:
1767 survey_child_resources(hbus
);
1779 * hv_pci_devices_present() - Handles list of new children
1780 * @hbus: Root PCI bus, as understood by this driver
1781 * @relations: Packet from host listing children
1783 * This function is invoked whenever a new list of devices for
1786 static void hv_pci_devices_present(struct hv_pcibus_device
*hbus
,
1787 struct pci_bus_relations
*relations
)
1789 struct hv_dr_state
*dr
;
1790 struct hv_dr_work
*dr_wrk
;
1791 unsigned long flags
;
1793 dr_wrk
= kzalloc(sizeof(*dr_wrk
), GFP_NOWAIT
);
1797 dr
= kzalloc(offsetof(struct hv_dr_state
, func
) +
1798 (sizeof(struct pci_function_description
) *
1799 (relations
->device_count
)), GFP_NOWAIT
);
1805 INIT_WORK(&dr_wrk
->wrk
, pci_devices_present_work
);
1807 dr
->device_count
= relations
->device_count
;
1808 if (dr
->device_count
!= 0) {
1809 memcpy(dr
->func
, relations
->func
,
1810 sizeof(struct pci_function_description
) *
1814 spin_lock_irqsave(&hbus
->device_list_lock
, flags
);
1815 list_add_tail(&dr
->list_entry
, &hbus
->dr_list
);
1816 spin_unlock_irqrestore(&hbus
->device_list_lock
, flags
);
1819 queue_work(hbus
->wq
, &dr_wrk
->wrk
);
1823 * hv_eject_device_work() - Asynchronously handles ejection
1824 * @work: Work struct embedded in internal device struct
1826 * This function handles ejecting a device. Windows will
1827 * attempt to gracefully eject a device, waiting 60 seconds to
1828 * hear back from the guest OS that this completed successfully.
1829 * If this timer expires, the device will be forcibly removed.
1831 static void hv_eject_device_work(struct work_struct
*work
)
1833 struct pci_eject_response
*ejct_pkt
;
1834 struct hv_pci_dev
*hpdev
;
1835 struct pci_dev
*pdev
;
1836 unsigned long flags
;
1839 struct pci_packet pkt
;
1840 u8 buffer
[sizeof(struct pci_eject_response
)];
1843 hpdev
= container_of(work
, struct hv_pci_dev
, wrk
);
1845 if (hpdev
->state
!= hv_pcichild_ejecting
) {
1846 put_pcichild(hpdev
, hv_pcidev_ref_pnp
);
1851 * Ejection can come before or after the PCI bus has been set up, so
1852 * attempt to find it and tear down the bus state, if it exists. This
1853 * must be done without constructs like pci_domain_nr(hbus->pci_bus)
1854 * because hbus->pci_bus may not exist yet.
1856 wslot
= wslot_to_devfn(hpdev
->desc
.win_slot
.slot
);
1857 pdev
= pci_get_domain_bus_and_slot(hpdev
->hbus
->sysdata
.domain
, 0,
1860 pci_lock_rescan_remove();
1861 pci_stop_and_remove_bus_device(pdev
);
1863 pci_unlock_rescan_remove();
1866 spin_lock_irqsave(&hpdev
->hbus
->device_list_lock
, flags
);
1867 list_del(&hpdev
->list_entry
);
1868 spin_unlock_irqrestore(&hpdev
->hbus
->device_list_lock
, flags
);
1870 memset(&ctxt
, 0, sizeof(ctxt
));
1871 ejct_pkt
= (struct pci_eject_response
*)&ctxt
.pkt
.message
;
1872 ejct_pkt
->message_type
.type
= PCI_EJECTION_COMPLETE
;
1873 ejct_pkt
->wslot
.slot
= hpdev
->desc
.win_slot
.slot
;
1874 vmbus_sendpacket(hpdev
->hbus
->hdev
->channel
, ejct_pkt
,
1875 sizeof(*ejct_pkt
), (unsigned long)&ctxt
.pkt
,
1876 VM_PKT_DATA_INBAND
, 0);
1878 put_pcichild(hpdev
, hv_pcidev_ref_childlist
);
1879 put_pcichild(hpdev
, hv_pcidev_ref_pnp
);
1880 put_hvpcibus(hpdev
->hbus
);
1884 * hv_pci_eject_device() - Handles device ejection
1885 * @hpdev: Internal device tracking struct
1887 * This function is invoked when an ejection packet arrives. It
1888 * just schedules work so that we don't re-enter the packet
1889 * delivery code handling the ejection.
1891 static void hv_pci_eject_device(struct hv_pci_dev
*hpdev
)
1893 hpdev
->state
= hv_pcichild_ejecting
;
1894 get_pcichild(hpdev
, hv_pcidev_ref_pnp
);
1895 INIT_WORK(&hpdev
->wrk
, hv_eject_device_work
);
1896 get_hvpcibus(hpdev
->hbus
);
1897 queue_work(hpdev
->hbus
->wq
, &hpdev
->wrk
);
1901 * hv_pci_onchannelcallback() - Handles incoming packets
1902 * @context: Internal bus tracking struct
1904 * This function is invoked whenever the host sends a packet to
1905 * this channel (which is private to this root PCI bus).
1907 static void hv_pci_onchannelcallback(void *context
)
1909 const int packet_size
= 0x100;
1911 struct hv_pcibus_device
*hbus
= context
;
1914 struct vmpacket_descriptor
*desc
;
1915 unsigned char *buffer
;
1916 int bufferlen
= packet_size
;
1917 struct pci_packet
*comp_packet
;
1918 struct pci_response
*response
;
1919 struct pci_incoming_message
*new_message
;
1920 struct pci_bus_relations
*bus_rel
;
1921 struct pci_dev_incoming
*dev_message
;
1922 struct hv_pci_dev
*hpdev
;
1924 buffer
= kmalloc(bufferlen
, GFP_ATOMIC
);
1929 ret
= vmbus_recvpacket_raw(hbus
->hdev
->channel
, buffer
,
1930 bufferlen
, &bytes_recvd
, &req_id
);
1932 if (ret
== -ENOBUFS
) {
1934 /* Handle large packet */
1935 bufferlen
= bytes_recvd
;
1936 buffer
= kmalloc(bytes_recvd
, GFP_ATOMIC
);
1942 /* Zero length indicates there are no more packets. */
1943 if (ret
|| !bytes_recvd
)
1947 * All incoming packets must be at least as large as a
1950 if (bytes_recvd
<= sizeof(struct pci_response
))
1952 desc
= (struct vmpacket_descriptor
*)buffer
;
1954 switch (desc
->type
) {
1958 * The host is trusted, and thus it's safe to interpret
1959 * this transaction ID as a pointer.
1961 comp_packet
= (struct pci_packet
*)req_id
;
1962 response
= (struct pci_response
*)buffer
;
1963 comp_packet
->completion_func(comp_packet
->compl_ctxt
,
1968 case VM_PKT_DATA_INBAND
:
1970 new_message
= (struct pci_incoming_message
*)buffer
;
1971 switch (new_message
->message_type
.type
) {
1972 case PCI_BUS_RELATIONS
:
1974 bus_rel
= (struct pci_bus_relations
*)buffer
;
1976 offsetof(struct pci_bus_relations
, func
) +
1977 (sizeof(struct pci_function_description
) *
1978 (bus_rel
->device_count
))) {
1979 dev_err(&hbus
->hdev
->device
,
1980 "bus relations too small\n");
1984 hv_pci_devices_present(hbus
, bus_rel
);
1989 dev_message
= (struct pci_dev_incoming
*)buffer
;
1990 hpdev
= get_pcichild_wslot(hbus
,
1991 dev_message
->wslot
.slot
);
1993 hv_pci_eject_device(hpdev
);
1995 hv_pcidev_ref_by_slot
);
2000 dev_warn(&hbus
->hdev
->device
,
2001 "Unimplemented protocol message %x\n",
2002 new_message
->message_type
.type
);
2008 dev_err(&hbus
->hdev
->device
,
2009 "unhandled packet type %d, tid %llx len %d\n",
2010 desc
->type
, req_id
, bytes_recvd
);
2019 * hv_pci_protocol_negotiation() - Set up protocol
2020 * @hdev: VMBus's tracking struct for this root PCI bus
2022 * This driver is intended to support running on Windows 10
2023 * (server) and later versions. It will not run on earlier
2024 * versions, as they assume that many of the operations which
2025 * Linux needs accomplished with a spinlock held were done via
2026 * asynchronous messaging via VMBus. Windows 10 increases the
2027 * surface area of PCI emulation so that these actions can take
2028 * place by suspending a virtual processor for their duration.
2030 * This function negotiates the channel protocol version,
2031 * failing if the host doesn't support the necessary protocol
2034 static int hv_pci_protocol_negotiation(struct hv_device
*hdev
)
2036 struct pci_version_request
*version_req
;
2037 struct hv_pci_compl comp_pkt
;
2038 struct pci_packet
*pkt
;
2043 * Initiate the handshake with the host and negotiate
2044 * a version that the host can support. We start with the
2045 * highest version number and go down if the host cannot
2048 pkt
= kzalloc(sizeof(*pkt
) + sizeof(*version_req
), GFP_KERNEL
);
2052 init_completion(&comp_pkt
.host_event
);
2053 pkt
->completion_func
= hv_pci_generic_compl
;
2054 pkt
->compl_ctxt
= &comp_pkt
;
2055 version_req
= (struct pci_version_request
*)&pkt
->message
;
2056 version_req
->message_type
.type
= PCI_QUERY_PROTOCOL_VERSION
;
2058 for (i
= 0; i
< ARRAY_SIZE(pci_protocol_versions
); i
++) {
2059 version_req
->protocol_version
= pci_protocol_versions
[i
];
2060 ret
= vmbus_sendpacket(hdev
->channel
, version_req
,
2061 sizeof(struct pci_version_request
),
2062 (unsigned long)pkt
, VM_PKT_DATA_INBAND
,
2063 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
2065 dev_err(&hdev
->device
,
2066 "PCI Pass-through VSP failed sending version reqquest: %#x",
2071 wait_for_completion(&comp_pkt
.host_event
);
2073 if (comp_pkt
.completion_status
>= 0) {
2074 pci_protocol_version
= pci_protocol_versions
[i
];
2075 dev_info(&hdev
->device
,
2076 "PCI VMBus probing: Using version %#x\n",
2077 pci_protocol_version
);
2081 if (comp_pkt
.completion_status
!= STATUS_REVISION_MISMATCH
) {
2082 dev_err(&hdev
->device
,
2083 "PCI Pass-through VSP failed version request: %#x",
2084 comp_pkt
.completion_status
);
2089 reinit_completion(&comp_pkt
.host_event
);
2092 dev_err(&hdev
->device
,
2093 "PCI pass-through VSP failed to find supported version");
2102 * hv_pci_free_bridge_windows() - Release memory regions for the
2104 * @hbus: Root PCI bus, as understood by this driver
2106 static void hv_pci_free_bridge_windows(struct hv_pcibus_device
*hbus
)
2109 * Set the resources back to the way they looked when they
2110 * were allocated by setting IORESOURCE_BUSY again.
2113 if (hbus
->low_mmio_space
&& hbus
->low_mmio_res
) {
2114 hbus
->low_mmio_res
->flags
|= IORESOURCE_BUSY
;
2115 vmbus_free_mmio(hbus
->low_mmio_res
->start
,
2116 resource_size(hbus
->low_mmio_res
));
2119 if (hbus
->high_mmio_space
&& hbus
->high_mmio_res
) {
2120 hbus
->high_mmio_res
->flags
|= IORESOURCE_BUSY
;
2121 vmbus_free_mmio(hbus
->high_mmio_res
->start
,
2122 resource_size(hbus
->high_mmio_res
));
2127 * hv_pci_allocate_bridge_windows() - Allocate memory regions
2129 * @hbus: Root PCI bus, as understood by this driver
2131 * This function calls vmbus_allocate_mmio(), which is itself a
2132 * bit of a compromise. Ideally, we might change the pnp layer
2133 * in the kernel such that it comprehends either PCI devices
2134 * which are "grandchildren of ACPI," with some intermediate bus
2135 * node (in this case, VMBus) or change it such that it
2136 * understands VMBus. The pnp layer, however, has been declared
2137 * deprecated, and not subject to change.
2139 * The workaround, implemented here, is to ask VMBus to allocate
2140 * MMIO space for this bus. VMBus itself knows which ranges are
2141 * appropriate by looking at its own ACPI objects. Then, after
2142 * these ranges are claimed, they're modified to look like they
2143 * would have looked if the ACPI and pnp code had allocated
2144 * bridge windows. These descriptors have to exist in this form
2145 * in order to satisfy the code which will get invoked when the
2146 * endpoint PCI function driver calls request_mem_region() or
2147 * request_mem_region_exclusive().
2149 * Return: 0 on success, -errno on failure
2151 static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device
*hbus
)
2153 resource_size_t align
;
2156 if (hbus
->low_mmio_space
) {
2157 align
= 1ULL << (63 - __builtin_clzll(hbus
->low_mmio_space
));
2158 ret
= vmbus_allocate_mmio(&hbus
->low_mmio_res
, hbus
->hdev
, 0,
2159 (u64
)(u32
)0xffffffff,
2160 hbus
->low_mmio_space
,
2163 dev_err(&hbus
->hdev
->device
,
2164 "Need %#llx of low MMIO space. Consider reconfiguring the VM.\n",
2165 hbus
->low_mmio_space
);
2169 /* Modify this resource to become a bridge window. */
2170 hbus
->low_mmio_res
->flags
|= IORESOURCE_WINDOW
;
2171 hbus
->low_mmio_res
->flags
&= ~IORESOURCE_BUSY
;
2172 pci_add_resource(&hbus
->resources_for_children
,
2173 hbus
->low_mmio_res
);
2176 if (hbus
->high_mmio_space
) {
2177 align
= 1ULL << (63 - __builtin_clzll(hbus
->high_mmio_space
));
2178 ret
= vmbus_allocate_mmio(&hbus
->high_mmio_res
, hbus
->hdev
,
2180 hbus
->high_mmio_space
, align
,
2183 dev_err(&hbus
->hdev
->device
,
2184 "Need %#llx of high MMIO space. Consider reconfiguring the VM.\n",
2185 hbus
->high_mmio_space
);
2186 goto release_low_mmio
;
2189 /* Modify this resource to become a bridge window. */
2190 hbus
->high_mmio_res
->flags
|= IORESOURCE_WINDOW
;
2191 hbus
->high_mmio_res
->flags
&= ~IORESOURCE_BUSY
;
2192 pci_add_resource(&hbus
->resources_for_children
,
2193 hbus
->high_mmio_res
);
2199 if (hbus
->low_mmio_res
) {
2200 vmbus_free_mmio(hbus
->low_mmio_res
->start
,
2201 resource_size(hbus
->low_mmio_res
));
2208 * hv_allocate_config_window() - Find MMIO space for PCI Config
2209 * @hbus: Root PCI bus, as understood by this driver
2211 * This function claims memory-mapped I/O space for accessing
2212 * configuration space for the functions on this bus.
2214 * Return: 0 on success, -errno on failure
2216 static int hv_allocate_config_window(struct hv_pcibus_device
*hbus
)
2221 * Set up a region of MMIO space to use for accessing configuration
2224 ret
= vmbus_allocate_mmio(&hbus
->mem_config
, hbus
->hdev
, 0, -1,
2225 PCI_CONFIG_MMIO_LENGTH
, 0x1000, false);
2230 * vmbus_allocate_mmio() gets used for allocating both device endpoint
2231 * resource claims (those which cannot be overlapped) and the ranges
2232 * which are valid for the children of this bus, which are intended
2233 * to be overlapped by those children. Set the flag on this claim
2234 * meaning that this region can't be overlapped.
2237 hbus
->mem_config
->flags
|= IORESOURCE_BUSY
;
2242 static void hv_free_config_window(struct hv_pcibus_device
*hbus
)
2244 vmbus_free_mmio(hbus
->mem_config
->start
, PCI_CONFIG_MMIO_LENGTH
);
2248 * hv_pci_enter_d0() - Bring the "bus" into the D0 power state
2249 * @hdev: VMBus's tracking struct for this root PCI bus
2251 * Return: 0 on success, -errno on failure
2253 static int hv_pci_enter_d0(struct hv_device
*hdev
)
2255 struct hv_pcibus_device
*hbus
= hv_get_drvdata(hdev
);
2256 struct pci_bus_d0_entry
*d0_entry
;
2257 struct hv_pci_compl comp_pkt
;
2258 struct pci_packet
*pkt
;
2262 * Tell the host that the bus is ready to use, and moved into the
2263 * powered-on state. This includes telling the host which region
2264 * of memory-mapped I/O space has been chosen for configuration space
2267 pkt
= kzalloc(sizeof(*pkt
) + sizeof(*d0_entry
), GFP_KERNEL
);
2271 init_completion(&comp_pkt
.host_event
);
2272 pkt
->completion_func
= hv_pci_generic_compl
;
2273 pkt
->compl_ctxt
= &comp_pkt
;
2274 d0_entry
= (struct pci_bus_d0_entry
*)&pkt
->message
;
2275 d0_entry
->message_type
.type
= PCI_BUS_D0ENTRY
;
2276 d0_entry
->mmio_base
= hbus
->mem_config
->start
;
2278 ret
= vmbus_sendpacket(hdev
->channel
, d0_entry
, sizeof(*d0_entry
),
2279 (unsigned long)pkt
, VM_PKT_DATA_INBAND
,
2280 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
2284 wait_for_completion(&comp_pkt
.host_event
);
2286 if (comp_pkt
.completion_status
< 0) {
2287 dev_err(&hdev
->device
,
2288 "PCI Pass-through VSP failed D0 Entry with status %x\n",
2289 comp_pkt
.completion_status
);
2302 * hv_pci_query_relations() - Ask host to send list of child
2304 * @hdev: VMBus's tracking struct for this root PCI bus
2306 * Return: 0 on success, -errno on failure
2308 static int hv_pci_query_relations(struct hv_device
*hdev
)
2310 struct hv_pcibus_device
*hbus
= hv_get_drvdata(hdev
);
2311 struct pci_message message
;
2312 struct completion comp
;
2315 /* Ask the host to send along the list of child devices */
2316 init_completion(&comp
);
2317 if (cmpxchg(&hbus
->survey_event
, NULL
, &comp
))
2320 memset(&message
, 0, sizeof(message
));
2321 message
.type
= PCI_QUERY_BUS_RELATIONS
;
2323 ret
= vmbus_sendpacket(hdev
->channel
, &message
, sizeof(message
),
2324 0, VM_PKT_DATA_INBAND
, 0);
2328 wait_for_completion(&comp
);
2333 * hv_send_resources_allocated() - Report local resource choices
2334 * @hdev: VMBus's tracking struct for this root PCI bus
2336 * The host OS is expecting to be sent a request as a message
2337 * which contains all the resources that the device will use.
2338 * The response contains those same resources, "translated"
2339 * which is to say, the values which should be used by the
2340 * hardware, when it delivers an interrupt. (MMIO resources are
2341 * used in local terms.) This is nice for Windows, and lines up
2342 * with the FDO/PDO split, which doesn't exist in Linux. Linux
2343 * is deeply expecting to scan an emulated PCI configuration
2344 * space. So this message is sent here only to drive the state
2345 * machine on the host forward.
2347 * Return: 0 on success, -errno on failure
2349 static int hv_send_resources_allocated(struct hv_device
*hdev
)
2351 struct hv_pcibus_device
*hbus
= hv_get_drvdata(hdev
);
2352 struct pci_resources_assigned
*res_assigned
;
2353 struct pci_resources_assigned2
*res_assigned2
;
2354 struct hv_pci_compl comp_pkt
;
2355 struct hv_pci_dev
*hpdev
;
2356 struct pci_packet
*pkt
;
2361 size_res
= (pci_protocol_version
< PCI_PROTOCOL_VERSION_1_2
)
2362 ? sizeof(*res_assigned
) : sizeof(*res_assigned2
);
2364 pkt
= kmalloc(sizeof(*pkt
) + size_res
, GFP_KERNEL
);
2370 for (wslot
= 0; wslot
< 256; wslot
++) {
2371 hpdev
= get_pcichild_wslot(hbus
, wslot
);
2375 memset(pkt
, 0, sizeof(*pkt
) + size_res
);
2376 init_completion(&comp_pkt
.host_event
);
2377 pkt
->completion_func
= hv_pci_generic_compl
;
2378 pkt
->compl_ctxt
= &comp_pkt
;
2380 if (pci_protocol_version
< PCI_PROTOCOL_VERSION_1_2
) {
2382 (struct pci_resources_assigned
*)&pkt
->message
;
2383 res_assigned
->message_type
.type
=
2384 PCI_RESOURCES_ASSIGNED
;
2385 res_assigned
->wslot
.slot
= hpdev
->desc
.win_slot
.slot
;
2388 (struct pci_resources_assigned2
*)&pkt
->message
;
2389 res_assigned2
->message_type
.type
=
2390 PCI_RESOURCES_ASSIGNED2
;
2391 res_assigned2
->wslot
.slot
= hpdev
->desc
.win_slot
.slot
;
2393 put_pcichild(hpdev
, hv_pcidev_ref_by_slot
);
2395 ret
= vmbus_sendpacket(hdev
->channel
, &pkt
->message
,
2396 size_res
, (unsigned long)pkt
,
2398 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
2402 wait_for_completion(&comp_pkt
.host_event
);
2404 if (comp_pkt
.completion_status
< 0) {
2406 dev_err(&hdev
->device
,
2407 "resource allocated returned 0x%x",
2408 comp_pkt
.completion_status
);
2418 * hv_send_resources_released() - Report local resources
2420 * @hdev: VMBus's tracking struct for this root PCI bus
2422 * Return: 0 on success, -errno on failure
2424 static int hv_send_resources_released(struct hv_device
*hdev
)
2426 struct hv_pcibus_device
*hbus
= hv_get_drvdata(hdev
);
2427 struct pci_child_message pkt
;
2428 struct hv_pci_dev
*hpdev
;
2432 for (wslot
= 0; wslot
< 256; wslot
++) {
2433 hpdev
= get_pcichild_wslot(hbus
, wslot
);
2437 memset(&pkt
, 0, sizeof(pkt
));
2438 pkt
.message_type
.type
= PCI_RESOURCES_RELEASED
;
2439 pkt
.wslot
.slot
= hpdev
->desc
.win_slot
.slot
;
2441 put_pcichild(hpdev
, hv_pcidev_ref_by_slot
);
2443 ret
= vmbus_sendpacket(hdev
->channel
, &pkt
, sizeof(pkt
), 0,
2444 VM_PKT_DATA_INBAND
, 0);
2452 static void get_hvpcibus(struct hv_pcibus_device
*hbus
)
2454 atomic_inc(&hbus
->remove_lock
);
2457 static void put_hvpcibus(struct hv_pcibus_device
*hbus
)
2459 if (atomic_dec_and_test(&hbus
->remove_lock
))
2460 complete(&hbus
->remove_event
);
2464 * hv_pci_probe() - New VMBus channel probe, for a root PCI bus
2465 * @hdev: VMBus's tracking struct for this root PCI bus
2466 * @dev_id: Identifies the device itself
2468 * Return: 0 on success, -errno on failure
2470 static int hv_pci_probe(struct hv_device
*hdev
,
2471 const struct hv_vmbus_device_id
*dev_id
)
2473 struct hv_pcibus_device
*hbus
;
2477 * hv_pcibus_device contains the hypercall arguments for retargeting in
2478 * hv_irq_unmask(). Those must not cross a page boundary.
2480 BUILD_BUG_ON(sizeof(*hbus
) > PAGE_SIZE
);
2482 hbus
= (struct hv_pcibus_device
*)get_zeroed_page(GFP_KERNEL
);
2485 hbus
->state
= hv_pcibus_init
;
2488 * The PCI bus "domain" is what is called "segment" in ACPI and
2489 * other specs. Pull it from the instance ID, to get something
2490 * unique. Bytes 8 and 9 are what is used in Windows guests, so
2491 * do the same thing for consistency. Note that, since this code
2492 * only runs in a Hyper-V VM, Hyper-V can (and does) guarantee
2493 * that (1) the only domain in use for something that looks like
2494 * a physical PCI bus (which is actually emulated by the
2495 * hypervisor) is domain 0 and (2) there will be no overlap
2496 * between domains derived from these instance IDs in the same
2499 hbus
->sysdata
.domain
= hdev
->dev_instance
.b
[9] |
2500 hdev
->dev_instance
.b
[8] << 8;
2503 atomic_inc(&hbus
->remove_lock
);
2504 INIT_LIST_HEAD(&hbus
->children
);
2505 INIT_LIST_HEAD(&hbus
->dr_list
);
2506 INIT_LIST_HEAD(&hbus
->resources_for_children
);
2507 spin_lock_init(&hbus
->config_lock
);
2508 spin_lock_init(&hbus
->device_list_lock
);
2509 spin_lock_init(&hbus
->retarget_msi_interrupt_lock
);
2510 init_completion(&hbus
->remove_event
);
2511 hbus
->wq
= alloc_ordered_workqueue("hv_pci_%x", 0,
2512 hbus
->sysdata
.domain
);
2518 ret
= vmbus_open(hdev
->channel
, pci_ring_size
, pci_ring_size
, NULL
, 0,
2519 hv_pci_onchannelcallback
, hbus
);
2523 hv_set_drvdata(hdev
, hbus
);
2525 ret
= hv_pci_protocol_negotiation(hdev
);
2529 ret
= hv_allocate_config_window(hbus
);
2533 hbus
->cfg_addr
= ioremap(hbus
->mem_config
->start
,
2534 PCI_CONFIG_MMIO_LENGTH
);
2535 if (!hbus
->cfg_addr
) {
2536 dev_err(&hdev
->device
,
2537 "Unable to map a virtual address for config space\n");
2542 hbus
->sysdata
.fwnode
= irq_domain_alloc_fwnode(hbus
);
2543 if (!hbus
->sysdata
.fwnode
) {
2548 ret
= hv_pcie_init_irq_domain(hbus
);
2552 ret
= hv_pci_query_relations(hdev
);
2554 goto free_irq_domain
;
2556 ret
= hv_pci_enter_d0(hdev
);
2558 goto free_irq_domain
;
2560 ret
= hv_pci_allocate_bridge_windows(hbus
);
2562 goto free_irq_domain
;
2564 ret
= hv_send_resources_allocated(hdev
);
2568 prepopulate_bars(hbus
);
2570 hbus
->state
= hv_pcibus_probed
;
2572 ret
= create_root_hv_pci_bus(hbus
);
2579 hv_pci_free_bridge_windows(hbus
);
2581 irq_domain_remove(hbus
->irq_domain
);
2583 irq_domain_free_fwnode(hbus
->sysdata
.fwnode
);
2585 iounmap(hbus
->cfg_addr
);
2587 hv_free_config_window(hbus
);
2589 vmbus_close(hdev
->channel
);
2591 destroy_workqueue(hbus
->wq
);
2593 free_page((unsigned long)hbus
);
2597 static void hv_pci_bus_exit(struct hv_device
*hdev
)
2599 struct hv_pcibus_device
*hbus
= hv_get_drvdata(hdev
);
2601 struct pci_packet teardown_packet
;
2602 u8 buffer
[sizeof(struct pci_message
)];
2604 struct pci_bus_relations relations
;
2605 struct hv_pci_compl comp_pkt
;
2609 * After the host sends the RESCIND_CHANNEL message, it doesn't
2610 * access the per-channel ringbuffer any longer.
2612 if (hdev
->channel
->rescind
)
2615 /* Delete any children which might still exist. */
2616 memset(&relations
, 0, sizeof(relations
));
2617 hv_pci_devices_present(hbus
, &relations
);
2619 ret
= hv_send_resources_released(hdev
);
2621 dev_err(&hdev
->device
,
2622 "Couldn't send resources released packet(s)\n");
2624 memset(&pkt
.teardown_packet
, 0, sizeof(pkt
.teardown_packet
));
2625 init_completion(&comp_pkt
.host_event
);
2626 pkt
.teardown_packet
.completion_func
= hv_pci_generic_compl
;
2627 pkt
.teardown_packet
.compl_ctxt
= &comp_pkt
;
2628 pkt
.teardown_packet
.message
[0].type
= PCI_BUS_D0EXIT
;
2630 ret
= vmbus_sendpacket(hdev
->channel
, &pkt
.teardown_packet
.message
,
2631 sizeof(struct pci_message
),
2632 (unsigned long)&pkt
.teardown_packet
,
2634 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
2636 wait_for_completion_timeout(&comp_pkt
.host_event
, 10 * HZ
);
2640 * hv_pci_remove() - Remove routine for this VMBus channel
2641 * @hdev: VMBus's tracking struct for this root PCI bus
2643 * Return: 0 on success, -errno on failure
2645 static int hv_pci_remove(struct hv_device
*hdev
)
2647 struct hv_pcibus_device
*hbus
;
2649 hbus
= hv_get_drvdata(hdev
);
2650 if (hbus
->state
== hv_pcibus_installed
) {
2651 /* Remove the bus from PCI's point of view. */
2652 pci_lock_rescan_remove();
2653 pci_stop_root_bus(hbus
->pci_bus
);
2654 pci_remove_root_bus(hbus
->pci_bus
);
2655 pci_unlock_rescan_remove();
2656 hbus
->state
= hv_pcibus_removed
;
2659 hv_pci_bus_exit(hdev
);
2661 vmbus_close(hdev
->channel
);
2663 iounmap(hbus
->cfg_addr
);
2664 hv_free_config_window(hbus
);
2665 pci_free_resource_list(&hbus
->resources_for_children
);
2666 hv_pci_free_bridge_windows(hbus
);
2667 irq_domain_remove(hbus
->irq_domain
);
2668 irq_domain_free_fwnode(hbus
->sysdata
.fwnode
);
2670 wait_for_completion(&hbus
->remove_event
);
2671 destroy_workqueue(hbus
->wq
);
2672 free_page((unsigned long)hbus
);
2676 static const struct hv_vmbus_device_id hv_pci_id_table
[] = {
2677 /* PCI Pass-through Class ID */
2678 /* 44C4F61D-4444-4400-9D52-802E27EDE19F */
2683 MODULE_DEVICE_TABLE(vmbus
, hv_pci_id_table
);
2685 static struct hv_driver hv_pci_drv
= {
2687 .id_table
= hv_pci_id_table
,
2688 .probe
= hv_pci_probe
,
2689 .remove
= hv_pci_remove
,
2692 static void __exit
exit_hv_pci_drv(void)
2694 vmbus_driver_unregister(&hv_pci_drv
);
2697 static int __init
init_hv_pci_drv(void)
2699 return vmbus_driver_register(&hv_pci_drv
);
2702 module_init(init_hv_pci_drv
);
2703 module_exit(exit_hv_pci_drv
);
2705 MODULE_DESCRIPTION("Hyper-V PCI");
2706 MODULE_LICENSE("GPL v2");