1 // SPDX-License-Identifier: GPL-2.0-or-later
3 #include <linux/kernel.h>
4 #include <linux/ioport.h>
5 #include <linux/bitmap.h>
12 /* for pci_dev_is_added() */
13 #include "../../../../drivers/pci/pci.h"
16 * The majority of the complexity in supporting SR-IOV on PowerNV comes from
17 * the need to put the MMIO space for each VF into a separate PE. Internally
18 * the PHB maps MMIO addresses to a specific PE using the "Memory BAR Table".
19 * The MBT historically only applied to the 64bit MMIO window of the PHB
20 * so it's common to see it referred to as the "M64BT".
22 * An MBT entry stores the mapped range as an <base>,<mask> pair. This forces
23 * the address range that we want to map to be power-of-two sized and aligned.
24 * For conventional PCI devices this isn't really an issue since PCI device BARs
25 * have the same requirement.
27 * For a SR-IOV BAR things are a little more awkward since size and alignment
28 * are not coupled. The alignment is set based on the the per-VF BAR size, but
29 * the total BAR area is: number-of-vfs * per-vf-size. The number of VFs
30 * isn't necessarily a power of two, so neither is the total size. To fix that
31 * we need to finesse (read: hack) the Linux BAR allocator so that it will
32 * allocate the SR-IOV BARs in a way that lets us map them using the MBT.
34 * The changes to size and alignment that we need to do depend on the "mode"
35 * of MBT entry that we use. We only support SR-IOV on PHB3 (IODA2) and above,
36 * so as a baseline we can assume that we have the following BAR modes
39 * NB: $PE_COUNT is the number of PEs that the PHB supports.
41 * a) A segmented BAR that splits the mapped range into $PE_COUNT equally sized
42 * segments. The n'th segment is mapped to the n'th PE.
43 * b) An un-segmented BAR that maps the whole address range to a specific PE.
46 * We prefer to use mode a) since it only requires one MBT entry per SR-IOV BAR
47 * For comparison b) requires one entry per-VF per-BAR, or:
48 * (num-vfs * num-sriov-bars) in total. To use a) we need the size of each segment
49 * to equal the size of the per-VF BAR area. So:
51 * new_size = per-vf-size * number-of-PEs
53 * The alignment for the SR-IOV BAR also needs to be changed from per-vf-size
54 * to "new_size", calculated above. Implementing this is a convoluted process
55 * which requires several hooks in the PCI core:
57 * 1. In pcibios_add_device() we call pnv_pci_ioda_fixup_iov().
59 * At this point the device has been probed and the device's BARs are sized,
60 * but no resource allocations have been done. The SR-IOV BARs are sized
61 * based on the maximum number of VFs supported by the device and we need
62 * to increase that to new_size.
64 * 2. Later, when Linux actually assigns resources it tries to make the resource
65 * allocations for each PCI bus as compact as possible. As a part of that it
66 * sorts the BARs on a bus by their required alignment, which is calculated
67 * using pci_resource_alignment().
69 * For IOV resources this goes:
70 * pci_resource_alignment()
71 * pci_sriov_resource_alignment()
72 * pcibios_sriov_resource_alignment()
73 * pnv_pci_iov_resource_alignment()
75 * Our hook overrides the default alignment, equal to the per-vf-size, with
76 * new_size computed above.
78 * 3. When userspace enables VFs for a device:
81 * pcibios_sriov_enable()
82 * pnv_pcibios_sriov_enable()
84 * This is where we actually allocate PE numbers for each VF and setup the
85 * MBT mapping for each SR-IOV BAR. In steps 1) and 2) we setup an "arena"
86 * where each MBT segment is equal in size to the VF BAR so we can shift
87 * around the actual SR-IOV BAR location within this arena. We need this
88 * ability because the PE space is shared by all devices on the same PHB.
89 * When using mode a) described above segment 0 in maps to PE#0 which might
90 * be already being used by another device on the PHB.
92 * As a result we need allocate a contigious range of PE numbers, then shift
93 * the address programmed into the SR-IOV BAR of the PF so that the address
94 * of VF0 matches up with the segment corresponding to the first allocated
95 * PE number. This is handled in pnv_pci_vf_resource_shift().
97 * Once all that is done we return to the PCI core which then enables VFs,
98 * scans them and creates pci_devs for each. The init process for a VF is
99 * largely the same as a normal device, but the VF is inserted into the IODA
100 * PE that we allocated for it rather than the PE associated with the bus.
102 * 4. When userspace disables VFs we unwind the above in
103 * pnv_pcibios_sriov_disable(). Fortunately this is relatively simple since
104 * we don't need to validate anything, just tear down the mappings and
105 * move SR-IOV resource back to its "proper" location.
107 * That's how mode a) works. In theory mode b) (single PE mapping) is less work
108 * since we can map each individual VF with a separate BAR. However, there's a
111 * 1) For IODA2 mode b) has a minimum alignment requirement of 32MB. This makes
112 * it only usable for devices with very large per-VF BARs. Such devices are
113 * similar to Big Foot. They definitely exist, but I've never seen one.
115 * 2) The number of MBT entries that we have is limited. PHB3 and PHB4 only
116 * 16 total and some are needed for. Most SR-IOV capable network cards can support
117 * more than 16 VFs on each port.
119 * We use b) when using a) would use more than 1/4 of the entire 64 bit MMIO
124 * PHB4 (IODA3) added a few new features that would be useful for SR-IOV. It
125 * allowed the MBT to map 32bit MMIO space in addition to 64bit which allows
126 * us to support SR-IOV BARs in the 32bit MMIO window. This is useful since
127 * the Linux BAR allocation will place any BAR marked as non-prefetchable into
128 * the non-prefetchable bridge window, which is 32bit only. It also added two
131 * c) A segmented BAR similar to a), but each segment can be individually
132 * mapped to any PE. This is matches how the 32bit MMIO window worked on
135 * d) A segmented BAR with 8, 64, or 128 segments. This works similarly to a),
136 * but with fewer segments and configurable base PE.
138 * i.e. The n'th segment maps to the (n + base)'th PE.
140 * The base PE is also required to be a multiple of the window size.
142 * Unfortunately, the OPAL API doesn't currently (as of skiboot v6.6) allow us
143 * to exploit any of the IODA3 features.
146 static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev
*pdev
)
148 struct pnv_phb
*phb
= pci_bus_to_pnvhb(pdev
->bus
);
149 struct resource
*res
;
151 resource_size_t vf_bar_sz
;
152 struct pnv_iov_data
*iov
;
155 iov
= kzalloc(sizeof(*iov
), GFP_KERNEL
);
158 pdev
->dev
.archdata
.iov_data
= iov
;
159 mul
= phb
->ioda
.total_pe_num
;
161 for (i
= 0; i
< PCI_SRIOV_NUM_BARS
; i
++) {
162 res
= &pdev
->resource
[i
+ PCI_IOV_RESOURCES
];
163 if (!res
->flags
|| res
->parent
)
165 if (!pnv_pci_is_m64_flags(res
->flags
)) {
166 dev_warn(&pdev
->dev
, "Don't support SR-IOV with non M64 VF BAR%d: %pR. \n",
171 vf_bar_sz
= pci_iov_resource_size(pdev
, i
+ PCI_IOV_RESOURCES
);
174 * Generally, one segmented M64 BAR maps one IOV BAR. However,
175 * if a VF BAR is too large we end up wasting a lot of space.
176 * If each VF needs more than 1/4 of the default m64 segment
177 * then each VF BAR should be mapped in single-PE mode to reduce
178 * the amount of space required. This does however limit the
179 * number of VFs we can support.
181 * The 1/4 limit is arbitrary and can be tweaked.
183 if (vf_bar_sz
> (phb
->ioda
.m64_segsize
>> 2)) {
185 * On PHB3, the minimum size alignment of M64 BAR in
186 * single mode is 32MB. If this VF BAR is smaller than
187 * 32MB, but still too large for a segmented window
188 * then we can't map it and need to disable SR-IOV for
191 if (vf_bar_sz
< SZ_32M
) {
192 pci_err(pdev
, "VF BAR%d: %pR can't be mapped in single PE mode\n",
197 iov
->m64_single_mode
[i
] = true;
202 * This BAR can be mapped with one segmented window, so adjust
203 * te resource size to accommodate.
205 pci_dbg(pdev
, " Fixing VF BAR%d: %pR to\n", i
, res
);
206 res
->end
= res
->start
+ vf_bar_sz
* mul
- 1;
207 pci_dbg(pdev
, " %pR\n", res
);
209 pci_info(pdev
, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)",
212 iov
->need_shift
= true;
218 /* Save ourselves some MMIO space by disabling the unusable BARs */
219 for (i
= 0; i
< PCI_SRIOV_NUM_BARS
; i
++) {
220 res
= &pdev
->resource
[i
+ PCI_IOV_RESOURCES
];
222 res
->end
= res
->start
- 1;
225 pdev
->dev
.archdata
.iov_data
= NULL
;
229 void pnv_pci_ioda_fixup_iov(struct pci_dev
*pdev
)
231 if (WARN_ON(pci_dev_is_added(pdev
)))
234 if (pdev
->is_virtfn
) {
235 struct pnv_ioda_pe
*pe
= pnv_ioda_get_pe(pdev
);
238 * VF PEs are single-device PEs so their pdev pointer needs to
239 * be set. The pdev doesn't exist when the PE is allocated (in
240 * (pcibios_sriov_enable()) so we fix it up here.
243 WARN_ON(!(pe
->flags
& PNV_IODA_PE_VF
));
244 } else if (pdev
->is_physfn
) {
246 * For PFs adjust their allocated IOV resources to match what
247 * the PHB can support using it's M64 BAR table.
249 pnv_pci_ioda_fixup_iov_resources(pdev
);
253 resource_size_t
pnv_pci_iov_resource_alignment(struct pci_dev
*pdev
,
256 resource_size_t align
= pci_iov_resource_size(pdev
, resno
);
257 struct pnv_phb
*phb
= pci_bus_to_pnvhb(pdev
->bus
);
258 struct pnv_iov_data
*iov
= pnv_iov_get(pdev
);
261 * iov can be null if we have an SR-IOV device with IOV BAR that can't
262 * be placed in the m64 space (i.e. The BAR is 32bit or non-prefetch).
263 * In that case we don't allow VFs to be enabled since one of their
264 * BARs would not be placed in the correct PE.
270 * If we're using single mode then we can just use the native VF BAR
271 * alignment. We validated that it's possible to use a single PE
272 * window above when we did the fixup.
274 if (iov
->m64_single_mode
[resno
- PCI_IOV_RESOURCES
])
278 * On PowerNV platform, IOV BAR is mapped by M64 BAR to enable the
279 * SR-IOV. While from hardware perspective, the range mapped by M64
280 * BAR should be size aligned.
282 * This function returns the total IOV BAR size if M64 BAR is in
283 * Shared PE mode or just VF BAR size if not.
284 * If the M64 BAR is in Single PE mode, return the VF BAR size or
285 * M64 segment size if IOV BAR size is less.
287 return phb
->ioda
.total_pe_num
* align
;
290 static int pnv_pci_vf_release_m64(struct pci_dev
*pdev
, u16 num_vfs
)
292 struct pnv_iov_data
*iov
;
296 phb
= pci_bus_to_pnvhb(pdev
->bus
);
297 iov
= pnv_iov_get(pdev
);
299 for_each_set_bit(window_id
, iov
->used_m64_bar_mask
, MAX_M64_BARS
) {
300 opal_pci_phb_mmio_enable(phb
->opal_id
,
301 OPAL_M64_WINDOW_TYPE
,
305 clear_bit(window_id
, &phb
->ioda
.m64_bar_alloc
);
313 * PHB3 and beyond support segmented windows. The window's address range
314 * is subdivided into phb->ioda.total_pe_num segments and there's a 1-1
315 * mapping between PEs and segments.
317 static int64_t pnv_ioda_map_m64_segmented(struct pnv_phb
*phb
,
319 resource_size_t start
,
320 resource_size_t size
)
324 rc
= opal_pci_set_phb_mem_window(phb
->opal_id
,
325 OPAL_M64_WINDOW_TYPE
,
333 rc
= opal_pci_phb_mmio_enable(phb
->opal_id
,
334 OPAL_M64_WINDOW_TYPE
,
336 OPAL_ENABLE_M64_SPLIT
);
339 pr_err("Failed to map M64 window #%d: %lld\n", window_id
, rc
);
344 static int64_t pnv_ioda_map_m64_single(struct pnv_phb
*phb
,
347 resource_size_t start
,
348 resource_size_t size
)
353 * The API for setting up m64 mmio windows seems to have been designed
354 * with P7-IOC in mind. For that chip each M64 BAR (window) had a fixed
355 * split of 8 equally sized segments each of which could individually
358 * The problem with this is that the API doesn't have any way to
359 * communicate the number of segments we want on a BAR. This wasn't
360 * a problem for p7-ioc since you didn't have a choice, but the
361 * single PE windows added in PHB3 don't map cleanly to this API.
363 * As a result we've got this slightly awkward process where we
364 * call opal_pci_map_pe_mmio_window() to put the single in single
365 * PE mode, and set the PE for the window before setting the address
366 * bounds. We need to do it this way because the single PE windows
367 * for PHB3 have different alignment requirements on PHB3.
369 rc
= opal_pci_map_pe_mmio_window(phb
->opal_id
,
371 OPAL_M64_WINDOW_TYPE
,
378 * NB: In single PE mode the window needs to be aligned to 32MB
380 rc
= opal_pci_set_phb_mem_window(phb
->opal_id
,
381 OPAL_M64_WINDOW_TYPE
,
384 0, /* ignored by FW, m64 is 1-1 */
390 * Now actually enable it. We specified the BAR should be in "non-split"
391 * mode so FW will validate that the BAR is in single PE mode.
393 rc
= opal_pci_phb_mmio_enable(phb
->opal_id
,
394 OPAL_M64_WINDOW_TYPE
,
396 OPAL_ENABLE_M64_NON_SPLIT
);
399 pr_err("Error mapping single PE BAR\n");
404 static int pnv_pci_alloc_m64_bar(struct pnv_phb
*phb
, struct pnv_iov_data
*iov
)
409 win
= find_next_zero_bit(&phb
->ioda
.m64_bar_alloc
,
410 phb
->ioda
.m64_bar_idx
+ 1, 0);
412 if (win
>= phb
->ioda
.m64_bar_idx
+ 1)
414 } while (test_and_set_bit(win
, &phb
->ioda
.m64_bar_alloc
));
416 set_bit(win
, iov
->used_m64_bar_mask
);
421 static int pnv_pci_vf_assign_m64(struct pci_dev
*pdev
, u16 num_vfs
)
423 struct pnv_iov_data
*iov
;
426 struct resource
*res
;
429 resource_size_t size
, start
;
432 phb
= pci_bus_to_pnvhb(pdev
->bus
);
433 iov
= pnv_iov_get(pdev
);
435 for (i
= 0; i
< PCI_SRIOV_NUM_BARS
; i
++) {
436 res
= &pdev
->resource
[i
+ PCI_IOV_RESOURCES
];
437 if (!res
->flags
|| !res
->parent
)
440 /* don't need single mode? map everything in one go! */
441 if (!iov
->m64_single_mode
[i
]) {
442 win
= pnv_pci_alloc_m64_bar(phb
, iov
);
446 size
= resource_size(res
);
449 rc
= pnv_ioda_map_m64_segmented(phb
, win
, start
, size
);
456 /* otherwise map each VF with single PE BARs */
457 size
= pci_iov_resource_size(pdev
, PCI_IOV_RESOURCES
+ i
);
458 base_pe_num
= iov
->vf_pe_arr
[0].pe_number
;
460 for (j
= 0; j
< num_vfs
; j
++) {
461 win
= pnv_pci_alloc_m64_bar(phb
, iov
);
465 start
= res
->start
+ size
* j
;
466 rc
= pnv_ioda_map_m64_single(phb
, win
,
477 pnv_pci_vf_release_m64(pdev
, num_vfs
);
481 static void pnv_ioda_release_vf_PE(struct pci_dev
*pdev
)
484 struct pnv_ioda_pe
*pe
, *pe_n
;
486 phb
= pci_bus_to_pnvhb(pdev
->bus
);
488 if (!pdev
->is_physfn
)
491 /* FIXME: Use pnv_ioda_release_pe()? */
492 list_for_each_entry_safe(pe
, pe_n
, &phb
->ioda
.pe_list
, list
) {
493 if (pe
->parent_dev
!= pdev
)
496 pnv_pci_ioda2_release_pe_dma(pe
);
498 /* Remove from list */
499 mutex_lock(&phb
->ioda
.pe_list_mutex
);
501 mutex_unlock(&phb
->ioda
.pe_list_mutex
);
503 pnv_ioda_deconfigure_pe(phb
, pe
);
505 pnv_ioda_free_pe(pe
);
509 static int pnv_pci_vf_resource_shift(struct pci_dev
*dev
, int offset
)
511 struct resource
*res
, res2
;
512 struct pnv_iov_data
*iov
;
513 resource_size_t size
;
519 iov
= pnv_iov_get(dev
);
522 * "offset" is in VFs. The M64 windows are sized so that when they
523 * are segmented, each segment is the same size as the IOV BAR.
524 * Each segment is in a separate PE, and the high order bits of the
525 * address are the PE number. Therefore, each VF's BAR is in a
526 * separate PE, and changing the IOV BAR start address changes the
527 * range of PEs the VFs are in.
529 num_vfs
= iov
->num_vfs
;
530 for (i
= 0; i
< PCI_SRIOV_NUM_BARS
; i
++) {
531 res
= &dev
->resource
[i
+ PCI_IOV_RESOURCES
];
532 if (!res
->flags
|| !res
->parent
)
534 if (iov
->m64_single_mode
[i
])
538 * The actual IOV BAR range is determined by the start address
539 * and the actual size for num_vfs VFs BAR. This check is to
540 * make sure that after shifting, the range will not overlap
541 * with another device.
543 size
= pci_iov_resource_size(dev
, i
+ PCI_IOV_RESOURCES
);
544 res2
.flags
= res
->flags
;
545 res2
.start
= res
->start
+ (size
* offset
);
546 res2
.end
= res2
.start
+ (size
* num_vfs
) - 1;
548 if (res2
.end
> res
->end
) {
549 dev_err(&dev
->dev
, "VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\n",
550 i
, &res2
, res
, num_vfs
, offset
);
556 * Since M64 BAR shares segments among all possible 256 PEs,
557 * we have to shift the beginning of PF IOV BAR to make it start from
558 * the segment which belongs to the PE number assigned to the first VF.
559 * This creates a "hole" in the /proc/iomem which could be used for
560 * allocating other resources so we reserve this area below and
561 * release when IOV is released.
563 for (i
= 0; i
< PCI_SRIOV_NUM_BARS
; i
++) {
564 res
= &dev
->resource
[i
+ PCI_IOV_RESOURCES
];
565 if (!res
->flags
|| !res
->parent
)
567 if (iov
->m64_single_mode
[i
])
570 size
= pci_iov_resource_size(dev
, i
+ PCI_IOV_RESOURCES
);
572 res
->start
+= size
* offset
;
574 dev_info(&dev
->dev
, "VF BAR%d: %pR shifted to %pR (%sabling %d VFs shifted by %d)\n",
575 i
, &res2
, res
, (offset
> 0) ? "En" : "Dis",
579 devm_release_resource(&dev
->dev
, &iov
->holes
[i
]);
580 memset(&iov
->holes
[i
], 0, sizeof(iov
->holes
[i
]));
583 pci_update_resource(dev
, i
+ PCI_IOV_RESOURCES
);
586 iov
->holes
[i
].start
= res2
.start
;
587 iov
->holes
[i
].end
= res2
.start
+ size
* offset
- 1;
588 iov
->holes
[i
].flags
= IORESOURCE_BUS
;
589 iov
->holes
[i
].name
= "pnv_iov_reserved";
590 devm_request_resource(&dev
->dev
, res
->parent
,
597 static void pnv_pci_sriov_disable(struct pci_dev
*pdev
)
599 u16 num_vfs
, base_pe
;
600 struct pnv_iov_data
*iov
;
602 iov
= pnv_iov_get(pdev
);
603 num_vfs
= iov
->num_vfs
;
604 base_pe
= iov
->vf_pe_arr
[0].pe_number
;
610 pnv_ioda_release_vf_PE(pdev
);
612 /* Un-shift the IOV BARs if we need to */
614 pnv_pci_vf_resource_shift(pdev
, -base_pe
);
616 /* Release M64 windows */
617 pnv_pci_vf_release_m64(pdev
, num_vfs
);
620 static void pnv_ioda_setup_vf_PE(struct pci_dev
*pdev
, u16 num_vfs
)
623 struct pnv_ioda_pe
*pe
;
626 struct pnv_iov_data
*iov
;
629 if (!pdev
->is_physfn
)
632 phb
= pci_bus_to_pnvhb(pdev
->bus
);
633 pdn
= pci_get_pdn(pdev
);
634 iov
= pnv_iov_get(pdev
);
636 /* Reserve PE for each VF */
637 for (vf_index
= 0; vf_index
< num_vfs
; vf_index
++) {
638 int vf_devfn
= pci_iov_virtfn_devfn(pdev
, vf_index
);
639 int vf_bus
= pci_iov_virtfn_bus(pdev
, vf_index
);
640 struct pci_dn
*vf_pdn
;
642 pe
= &iov
->vf_pe_arr
[vf_index
];
644 pe
->flags
= PNV_IODA_PE_VF
;
646 pe
->parent_dev
= pdev
;
648 pe
->rid
= (vf_bus
<< 8) | vf_devfn
;
650 pe_num
= pe
->pe_number
;
651 pe_info(pe
, "VF %04d:%02d:%02d.%d associated with PE#%x\n",
652 pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
653 PCI_SLOT(vf_devfn
), PCI_FUNC(vf_devfn
), pe_num
);
655 if (pnv_ioda_configure_pe(phb
, pe
)) {
656 /* XXX What do we do here ? */
657 pnv_ioda_free_pe(pe
);
662 /* Put PE to the list */
663 mutex_lock(&phb
->ioda
.pe_list_mutex
);
664 list_add_tail(&pe
->list
, &phb
->ioda
.pe_list
);
665 mutex_unlock(&phb
->ioda
.pe_list_mutex
);
667 /* associate this pe to it's pdn */
668 list_for_each_entry(vf_pdn
, &pdn
->parent
->child_list
, list
) {
669 if (vf_pdn
->busno
== vf_bus
&&
670 vf_pdn
->devfn
== vf_devfn
) {
671 vf_pdn
->pe_number
= pe_num
;
676 pnv_pci_ioda2_setup_dma_pe(phb
, pe
);
680 static int pnv_pci_sriov_enable(struct pci_dev
*pdev
, u16 num_vfs
)
682 struct pnv_ioda_pe
*base_pe
;
683 struct pnv_iov_data
*iov
;
688 phb
= pci_bus_to_pnvhb(pdev
->bus
);
689 iov
= pnv_iov_get(pdev
);
692 * There's a calls to IODA2 PE setup code littered throughout. We could
693 * probably fix that, but we'd still have problems due to the
694 * restriction inherent on IODA1 PHBs.
696 * NB: We class IODA3 as IODA2 since they're very similar.
698 if (phb
->type
!= PNV_PHB_IODA2
) {
699 pci_err(pdev
, "SR-IOV is not supported on this PHB\n");
704 dev_info(&pdev
->dev
, "don't support this SRIOV device with non 64bit-prefetchable IOV BAR\n");
708 /* allocate a contigious block of PEs for our VFs */
709 base_pe
= pnv_ioda_alloc_pe(phb
, num_vfs
);
711 pci_err(pdev
, "Unable to allocate PEs for %d VFs\n", num_vfs
);
715 iov
->vf_pe_arr
= base_pe
;
716 iov
->num_vfs
= num_vfs
;
718 /* Assign M64 window accordingly */
719 ret
= pnv_pci_vf_assign_m64(pdev
, num_vfs
);
721 dev_info(&pdev
->dev
, "Not enough M64 window resources\n");
726 * When using one M64 BAR to map one IOV BAR, we need to shift
727 * the IOV BAR according to the PE# allocated to the VFs.
728 * Otherwise, the PE# for the VF will conflict with others.
730 if (iov
->need_shift
) {
731 ret
= pnv_pci_vf_resource_shift(pdev
, base_pe
->pe_number
);
737 pnv_ioda_setup_vf_PE(pdev
, num_vfs
);
742 pnv_pci_vf_release_m64(pdev
, num_vfs
);
745 for (i
= 0; i
< num_vfs
; i
++)
746 pnv_ioda_free_pe(&iov
->vf_pe_arr
[i
]);
751 int pnv_pcibios_sriov_disable(struct pci_dev
*pdev
)
753 pnv_pci_sriov_disable(pdev
);
755 /* Release PCI data */
756 remove_sriov_vf_pdns(pdev
);
760 int pnv_pcibios_sriov_enable(struct pci_dev
*pdev
, u16 num_vfs
)
762 /* Allocate PCI data */
763 add_sriov_vf_pdns(pdev
);
765 return pnv_pci_sriov_enable(pdev
, num_vfs
);