4 * Copyright (c) 2010 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "hw/pci/pci_bridge.h"
24 #include "hw/pci/pcie.h"
25 #include "hw/pci/msix.h"
26 #include "hw/pci/msi.h"
27 #include "hw/pci/pci_bus.h"
28 #include "hw/pci/pcie_regs.h"
29 #include "hw/pci/pcie_port.h"
30 #include "qemu/range.h"
35 # define PCIE_DPRINTF(fmt, ...) \
36 fprintf(stderr, "%s:%d " fmt, __func__, __LINE__, ## __VA_ARGS__)
38 # define PCIE_DPRINTF(fmt, ...) do {} while (0)
40 #define PCIE_DEV_PRINTF(dev, fmt, ...) \
41 PCIE_DPRINTF("%s:%x "fmt, (dev)->name, (dev)->devfn, ## __VA_ARGS__)
43 static bool pcie_sltctl_powered_off(uint16_t sltctl
)
45 return (sltctl
& PCI_EXP_SLTCTL_PCC
) == PCI_EXP_SLTCTL_PWR_OFF
46 && (sltctl
& PCI_EXP_SLTCTL_PIC
) == PCI_EXP_SLTCTL_PWR_IND_OFF
;
49 static const char *pcie_led_state_to_str(uint16_t value
)
52 case PCI_EXP_SLTCTL_PWR_IND_ON
:
53 case PCI_EXP_SLTCTL_ATTN_IND_ON
:
55 case PCI_EXP_SLTCTL_PWR_IND_BLINK
:
56 case PCI_EXP_SLTCTL_ATTN_IND_BLINK
:
58 case PCI_EXP_SLTCTL_PWR_IND_OFF
:
59 case PCI_EXP_SLTCTL_ATTN_IND_OFF
:
66 /***************************************************************************
67 * pci express capability helper functions
71 pcie_cap_v1_fill(PCIDevice
*dev
, uint8_t port
, uint8_t type
, uint8_t version
)
73 uint8_t *exp_cap
= dev
->config
+ dev
->exp
.exp_cap
;
74 uint8_t *cmask
= dev
->cmask
+ dev
->exp
.exp_cap
;
76 /* capability register
77 interrupt message number defaults to 0 */
78 pci_set_word(exp_cap
+ PCI_EXP_FLAGS
,
79 ((type
<< PCI_EXP_FLAGS_TYPE_SHIFT
) & PCI_EXP_FLAGS_TYPE
) |
82 /* device capability register
84 * roll based error reporting bit must be set by all
85 * Functions conforming to the ECN, PCI Express Base
86 * Specification, Revision 1.1., or subsequent PCI Express Base
87 * Specification revisions.
89 pci_set_long(exp_cap
+ PCI_EXP_DEVCAP
, PCI_EXP_DEVCAP_RBER
);
91 pci_set_long(exp_cap
+ PCI_EXP_LNKCAP
,
92 (port
<< PCI_EXP_LNKCAP_PN_SHIFT
) |
93 PCI_EXP_LNKCAP_ASPMS_0S
|
94 QEMU_PCI_EXP_LNKCAP_MLW(QEMU_PCI_EXP_LNK_X1
) |
95 QEMU_PCI_EXP_LNKCAP_MLS(QEMU_PCI_EXP_LNK_2_5GT
));
97 pci_set_word(exp_cap
+ PCI_EXP_LNKSTA
,
98 QEMU_PCI_EXP_LNKSTA_NLW(QEMU_PCI_EXP_LNK_X1
) |
99 QEMU_PCI_EXP_LNKSTA_CLS(QEMU_PCI_EXP_LNK_2_5GT
));
101 /* We changed link status bits over time, and changing them across
102 * migrations is generally fine as hardware changes them too.
103 * Let's not bother checking.
105 pci_set_word(cmask
+ PCI_EXP_LNKSTA
, 0);
108 static void pcie_cap_fill_slot_lnk(PCIDevice
*dev
)
110 PCIESlot
*s
= (PCIESlot
*)object_dynamic_cast(OBJECT(dev
), TYPE_PCIE_SLOT
);
111 uint8_t *exp_cap
= dev
->config
+ dev
->exp
.exp_cap
;
113 /* Skip anything that isn't a PCIESlot */
118 /* Clear and fill LNKCAP from what was configured above */
119 pci_long_test_and_clear_mask(exp_cap
+ PCI_EXP_LNKCAP
,
120 PCI_EXP_LNKCAP_MLW
| PCI_EXP_LNKCAP_SLS
);
121 pci_long_test_and_set_mask(exp_cap
+ PCI_EXP_LNKCAP
,
122 QEMU_PCI_EXP_LNKCAP_MLW(s
->width
) |
123 QEMU_PCI_EXP_LNKCAP_MLS(s
->speed
));
126 * Link bandwidth notification is required for all root ports and
127 * downstream ports supporting links wider than x1 or multiple link
130 if (s
->width
> QEMU_PCI_EXP_LNK_X1
||
131 s
->speed
> QEMU_PCI_EXP_LNK_2_5GT
) {
132 pci_long_test_and_set_mask(exp_cap
+ PCI_EXP_LNKCAP
,
133 PCI_EXP_LNKCAP_LBNC
);
136 if (s
->speed
> QEMU_PCI_EXP_LNK_2_5GT
) {
138 * Hot-plug capable downstream ports and downstream ports supporting
139 * link speeds greater than 5GT/s must hardwire PCI_EXP_LNKCAP_DLLLARC
140 * to 1b. PCI_EXP_LNKCAP_DLLLARC implies PCI_EXP_LNKSTA_DLLLA, which
141 * we also hardwire to 1b here. 2.5GT/s hot-plug slots should also
142 * technically implement this, but it's not done here for compatibility.
144 pci_long_test_and_set_mask(exp_cap
+ PCI_EXP_LNKCAP
,
145 PCI_EXP_LNKCAP_DLLLARC
);
146 /* the PCI_EXP_LNKSTA_DLLLA will be set in the hotplug function */
149 * Target Link Speed defaults to the highest link speed supported by
150 * the component. 2.5GT/s devices are permitted to hardwire to zero.
152 pci_word_test_and_clear_mask(exp_cap
+ PCI_EXP_LNKCTL2
,
153 PCI_EXP_LNKCTL2_TLS
);
154 pci_word_test_and_set_mask(exp_cap
+ PCI_EXP_LNKCTL2
,
155 QEMU_PCI_EXP_LNKCAP_MLS(s
->speed
) &
156 PCI_EXP_LNKCTL2_TLS
);
160 * 2.5 & 5.0GT/s can be fully described by LNKCAP, but 8.0GT/s is
161 * actually a reference to the highest bit supported in this register.
162 * We assume the device supports all link speeds.
164 if (s
->speed
> QEMU_PCI_EXP_LNK_5GT
) {
165 pci_long_test_and_clear_mask(exp_cap
+ PCI_EXP_LNKCAP2
, ~0U);
166 pci_long_test_and_set_mask(exp_cap
+ PCI_EXP_LNKCAP2
,
167 PCI_EXP_LNKCAP2_SLS_2_5GB
|
168 PCI_EXP_LNKCAP2_SLS_5_0GB
|
169 PCI_EXP_LNKCAP2_SLS_8_0GB
);
170 if (s
->speed
> QEMU_PCI_EXP_LNK_8GT
) {
171 pci_long_test_and_set_mask(exp_cap
+ PCI_EXP_LNKCAP2
,
172 PCI_EXP_LNKCAP2_SLS_16_0GB
);
174 if (s
->speed
> QEMU_PCI_EXP_LNK_16GT
) {
175 pci_long_test_and_set_mask(exp_cap
+ PCI_EXP_LNKCAP2
,
176 PCI_EXP_LNKCAP2_SLS_32_0GB
);
178 if (s
->speed
> QEMU_PCI_EXP_LNK_32GT
) {
179 pci_long_test_and_set_mask(exp_cap
+ PCI_EXP_LNKCAP2
,
180 PCI_EXP_LNKCAP2_SLS_64_0GB
);
185 int pcie_cap_init(PCIDevice
*dev
, uint8_t offset
,
186 uint8_t type
, uint8_t port
,
189 /* PCIe cap v2 init */
193 assert(pci_is_express(dev
));
195 pos
= pci_add_capability(dev
, PCI_CAP_ID_EXP
, offset
,
196 PCI_EXP_VER2_SIZEOF
, errp
);
200 dev
->exp
.exp_cap
= pos
;
201 exp_cap
= dev
->config
+ pos
;
203 /* Filling values common with v1 */
204 pcie_cap_v1_fill(dev
, port
, type
, PCI_EXP_FLAGS_VER2
);
206 /* Fill link speed and width options */
207 pcie_cap_fill_slot_lnk(dev
);
209 /* Filling v2 specific values */
210 pci_set_long(exp_cap
+ PCI_EXP_DEVCAP2
,
211 PCI_EXP_DEVCAP2_EFF
| PCI_EXP_DEVCAP2_EETLPP
);
213 pci_set_word(dev
->wmask
+ pos
+ PCI_EXP_DEVCTL2
, PCI_EXP_DEVCTL2_EETLPPB
);
215 if (dev
->cap_present
& QEMU_PCIE_EXTCAP_INIT
) {
216 /* read-only to behave like a 'NULL' Extended Capability Header */
217 pci_set_long(dev
->wmask
+ PCI_CONFIG_SPACE_SIZE
, 0);
223 int pcie_cap_v1_init(PCIDevice
*dev
, uint8_t offset
, uint8_t type
,
226 /* PCIe cap v1 init */
228 Error
*local_err
= NULL
;
230 assert(pci_is_express(dev
));
232 pos
= pci_add_capability(dev
, PCI_CAP_ID_EXP
, offset
,
233 PCI_EXP_VER1_SIZEOF
, &local_err
);
235 error_report_err(local_err
);
238 dev
->exp
.exp_cap
= pos
;
240 pcie_cap_v1_fill(dev
, port
, type
, PCI_EXP_FLAGS_VER1
);
246 pcie_endpoint_cap_common_init(PCIDevice
*dev
, uint8_t offset
, uint8_t cap_size
)
248 uint8_t type
= PCI_EXP_TYPE_ENDPOINT
;
249 Error
*local_err
= NULL
;
253 * Windows guests will report Code 10, device cannot start, if
254 * a regular Endpoint type is exposed on a root complex. These
255 * should instead be Root Complex Integrated Endpoints.
257 if (pci_bus_is_express(pci_get_bus(dev
))
258 && pci_bus_is_root(pci_get_bus(dev
))) {
259 type
= PCI_EXP_TYPE_RC_END
;
262 if (cap_size
== PCI_EXP_VER1_SIZEOF
) {
263 return pcie_cap_v1_init(dev
, offset
, type
, 0);
265 ret
= pcie_cap_init(dev
, offset
, type
, 0, &local_err
);
268 error_report_err(local_err
);
275 int pcie_endpoint_cap_init(PCIDevice
*dev
, uint8_t offset
)
277 return pcie_endpoint_cap_common_init(dev
, offset
, PCI_EXP_VER2_SIZEOF
);
280 int pcie_endpoint_cap_v1_init(PCIDevice
*dev
, uint8_t offset
)
282 return pcie_endpoint_cap_common_init(dev
, offset
, PCI_EXP_VER1_SIZEOF
);
285 void pcie_cap_exit(PCIDevice
*dev
)
287 pci_del_capability(dev
, PCI_CAP_ID_EXP
, PCI_EXP_VER2_SIZEOF
);
290 void pcie_cap_v1_exit(PCIDevice
*dev
)
292 pci_del_capability(dev
, PCI_CAP_ID_EXP
, PCI_EXP_VER1_SIZEOF
);
295 uint8_t pcie_cap_get_type(const PCIDevice
*dev
)
297 uint32_t pos
= dev
->exp
.exp_cap
;
299 return (pci_get_word(dev
->config
+ pos
+ PCI_EXP_FLAGS
) &
300 PCI_EXP_FLAGS_TYPE
) >> PCI_EXP_FLAGS_TYPE_SHIFT
;
303 uint8_t pcie_cap_get_version(const PCIDevice
*dev
)
305 uint32_t pos
= dev
->exp
.exp_cap
;
307 return pci_get_word(dev
->config
+ pos
+ PCI_EXP_FLAGS
) & PCI_EXP_FLAGS_VERS
;
311 /* pci express interrupt message number */
312 /* 7.8.2 PCI Express Capabilities Register: Interrupt Message Number */
313 void pcie_cap_flags_set_vector(PCIDevice
*dev
, uint8_t vector
)
315 uint8_t *exp_cap
= dev
->config
+ dev
->exp
.exp_cap
;
317 pci_word_test_and_clear_mask(exp_cap
+ PCI_EXP_FLAGS
, PCI_EXP_FLAGS_IRQ
);
318 pci_word_test_and_set_mask(exp_cap
+ PCI_EXP_FLAGS
,
319 vector
<< PCI_EXP_FLAGS_IRQ_SHIFT
);
322 uint8_t pcie_cap_flags_get_vector(PCIDevice
*dev
)
324 return (pci_get_word(dev
->config
+ dev
->exp
.exp_cap
+ PCI_EXP_FLAGS
) &
325 PCI_EXP_FLAGS_IRQ
) >> PCI_EXP_FLAGS_IRQ_SHIFT
;
328 void pcie_cap_deverr_init(PCIDevice
*dev
)
330 uint32_t pos
= dev
->exp
.exp_cap
;
331 pci_long_test_and_set_mask(dev
->config
+ pos
+ PCI_EXP_DEVCAP
,
332 PCI_EXP_DEVCAP_RBER
);
333 pci_long_test_and_set_mask(dev
->wmask
+ pos
+ PCI_EXP_DEVCTL
,
334 PCI_EXP_DEVCTL_CERE
| PCI_EXP_DEVCTL_NFERE
|
335 PCI_EXP_DEVCTL_FERE
| PCI_EXP_DEVCTL_URRE
);
336 pci_long_test_and_set_mask(dev
->w1cmask
+ pos
+ PCI_EXP_DEVSTA
,
337 PCI_EXP_DEVSTA_CED
| PCI_EXP_DEVSTA_NFED
|
338 PCI_EXP_DEVSTA_FED
| PCI_EXP_DEVSTA_URD
);
341 void pcie_cap_deverr_reset(PCIDevice
*dev
)
343 uint8_t *devctl
= dev
->config
+ dev
->exp
.exp_cap
+ PCI_EXP_DEVCTL
;
344 pci_long_test_and_clear_mask(devctl
,
345 PCI_EXP_DEVCTL_CERE
| PCI_EXP_DEVCTL_NFERE
|
346 PCI_EXP_DEVCTL_FERE
| PCI_EXP_DEVCTL_URRE
);
349 void pcie_cap_lnkctl_init(PCIDevice
*dev
)
351 uint32_t pos
= dev
->exp
.exp_cap
;
352 pci_long_test_and_set_mask(dev
->wmask
+ pos
+ PCI_EXP_LNKCTL
,
353 PCI_EXP_LNKCTL_CCC
| PCI_EXP_LNKCTL_ES
);
356 void pcie_cap_lnkctl_reset(PCIDevice
*dev
)
358 uint8_t *lnkctl
= dev
->config
+ dev
->exp
.exp_cap
+ PCI_EXP_LNKCTL
;
359 pci_long_test_and_clear_mask(lnkctl
,
360 PCI_EXP_LNKCTL_CCC
| PCI_EXP_LNKCTL_ES
);
363 static void hotplug_event_update_event_status(PCIDevice
*dev
)
365 uint32_t pos
= dev
->exp
.exp_cap
;
366 uint8_t *exp_cap
= dev
->config
+ pos
;
367 uint16_t sltctl
= pci_get_word(exp_cap
+ PCI_EXP_SLTCTL
);
368 uint16_t sltsta
= pci_get_word(exp_cap
+ PCI_EXP_SLTSTA
);
370 dev
->exp
.hpev_notified
= (sltctl
& PCI_EXP_SLTCTL_HPIE
) &&
371 (sltsta
& sltctl
& PCI_EXP_HP_EV_SUPPORTED
);
374 static void hotplug_event_notify(PCIDevice
*dev
)
376 bool prev
= dev
->exp
.hpev_notified
;
378 hotplug_event_update_event_status(dev
);
380 if (prev
== dev
->exp
.hpev_notified
) {
384 /* Note: the logic above does not take into account whether interrupts
385 * are masked. The result is that interrupt will be sent when it is
386 * subsequently unmasked. This appears to be legal: Section 6.7.3.4:
387 * The Port may optionally send an MSI when there are hot-plug events that
388 * occur while interrupt generation is disabled, and interrupt generation is
389 * subsequently enabled. */
390 if (msix_enabled(dev
)) {
391 msix_notify(dev
, pcie_cap_flags_get_vector(dev
));
392 } else if (msi_enabled(dev
)) {
393 msi_notify(dev
, pcie_cap_flags_get_vector(dev
));
394 } else if (pci_intx(dev
) != -1) {
395 pci_set_irq(dev
, dev
->exp
.hpev_notified
);
399 static void hotplug_event_clear(PCIDevice
*dev
)
401 hotplug_event_update_event_status(dev
);
402 if (!msix_enabled(dev
) && !msi_enabled(dev
) && pci_intx(dev
) != -1 &&
403 !dev
->exp
.hpev_notified
) {
404 pci_irq_deassert(dev
);
408 void pcie_cap_slot_enable_power(PCIDevice
*dev
)
410 uint8_t *exp_cap
= dev
->config
+ dev
->exp
.exp_cap
;
411 uint32_t sltcap
= pci_get_long(exp_cap
+ PCI_EXP_SLTCAP
);
413 if (sltcap
& PCI_EXP_SLTCAP_PCP
) {
414 pci_word_test_and_clear_mask(exp_cap
+ PCI_EXP_SLTCTL
,
419 static void pcie_set_power_device(PCIBus
*bus
, PCIDevice
*dev
, void *opaque
)
421 bool *power
= opaque
;
423 pci_set_power(dev
, *power
);
426 static void pcie_cap_update_power(PCIDevice
*hotplug_dev
)
428 uint8_t *exp_cap
= hotplug_dev
->config
+ hotplug_dev
->exp
.exp_cap
;
429 PCIBus
*sec_bus
= pci_bridge_get_sec_bus(PCI_BRIDGE(hotplug_dev
));
430 uint32_t sltcap
= pci_get_long(exp_cap
+ PCI_EXP_SLTCAP
);
431 uint16_t sltctl
= pci_get_word(exp_cap
+ PCI_EXP_SLTCTL
);
434 if (sltcap
& PCI_EXP_SLTCAP_PCP
) {
435 power
= (sltctl
& PCI_EXP_SLTCTL_PCC
) == PCI_EXP_SLTCTL_PWR_ON
;
436 /* Don't we need to check also (sltctl & PCI_EXP_SLTCTL_PIC) ? */
439 pci_for_each_device(sec_bus
, pci_bus_num(sec_bus
),
440 pcie_set_power_device
, &power
);
444 * A PCI Express Hot-Plug Event has occurred, so update slot status register
445 * and notify OS of the event if necessary.
447 * 6.7.3 PCI Express Hot-Plug Events
448 * 6.7.3.4 Software Notification of Hot-Plug Events
450 static void pcie_cap_slot_event(PCIDevice
*dev
, PCIExpressHotPlugEvent event
)
452 /* Minor optimization: if nothing changed - no event is needed. */
453 if (pci_word_test_and_set_mask(dev
->config
+ dev
->exp
.exp_cap
+
454 PCI_EXP_SLTSTA
, event
) == event
) {
457 hotplug_event_notify(dev
);
460 static void pcie_cap_slot_plug_common(PCIDevice
*hotplug_dev
, DeviceState
*dev
,
463 uint8_t *exp_cap
= hotplug_dev
->config
+ hotplug_dev
->exp
.exp_cap
;
464 uint16_t sltsta
= pci_get_word(exp_cap
+ PCI_EXP_SLTSTA
);
466 PCIE_DEV_PRINTF(PCI_DEVICE(dev
), "hotplug state: 0x%x\n", sltsta
);
467 if (sltsta
& PCI_EXP_SLTSTA_EIS
) {
468 /* the slot is electromechanically locked.
469 * This error is propagated up to qdev and then to HMP/QMP.
471 error_setg_errno(errp
, EBUSY
, "slot is electromechanically locked");
475 void pcie_cap_slot_pre_plug_cb(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
478 PCIDevice
*hotplug_pdev
= PCI_DEVICE(hotplug_dev
);
479 uint8_t *exp_cap
= hotplug_pdev
->config
+ hotplug_pdev
->exp
.exp_cap
;
480 uint32_t sltcap
= pci_get_word(exp_cap
+ PCI_EXP_SLTCAP
);
482 /* Check if hot-plug is disabled on the slot */
483 if (dev
->hotplugged
&& (sltcap
& PCI_EXP_SLTCAP_HPC
) == 0) {
484 error_setg(errp
, "Hot-plug failed: unsupported by the port device '%s'",
485 DEVICE(hotplug_pdev
)->id
);
489 pcie_cap_slot_plug_common(PCI_DEVICE(hotplug_dev
), dev
, errp
);
492 void pcie_cap_slot_plug_cb(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
495 PCIDevice
*hotplug_pdev
= PCI_DEVICE(hotplug_dev
);
496 uint8_t *exp_cap
= hotplug_pdev
->config
+ hotplug_pdev
->exp
.exp_cap
;
497 PCIDevice
*pci_dev
= PCI_DEVICE(dev
);
498 uint32_t lnkcap
= pci_get_long(exp_cap
+ PCI_EXP_LNKCAP
);
500 if (pci_is_vf(pci_dev
)) {
501 /* Virtual function cannot be physically disconnected */
505 /* Don't send event when device is enabled during qemu machine creation:
506 * it is present on boot, no hotplug event is necessary. We do send an
507 * event when the device is disabled later. */
508 if (!dev
->hotplugged
) {
509 pci_word_test_and_set_mask(exp_cap
+ PCI_EXP_SLTSTA
,
511 if (pci_dev
->cap_present
& QEMU_PCIE_LNKSTA_DLLLA
||
512 (lnkcap
& PCI_EXP_LNKCAP_DLLLARC
)) {
513 pci_word_test_and_set_mask(exp_cap
+ PCI_EXP_LNKSTA
,
514 PCI_EXP_LNKSTA_DLLLA
);
516 pcie_cap_update_power(hotplug_pdev
);
520 /* To enable multifunction hot-plug, we just ensure the function
521 * 0 added last. When function 0 is added, we set the sltsta and
522 * inform OS via event notification.
524 if (pci_get_function_0(pci_dev
)) {
525 pci_word_test_and_set_mask(exp_cap
+ PCI_EXP_SLTSTA
,
527 if (pci_dev
->cap_present
& QEMU_PCIE_LNKSTA_DLLLA
||
528 (lnkcap
& PCI_EXP_LNKCAP_DLLLARC
)) {
529 pci_word_test_and_set_mask(exp_cap
+ PCI_EXP_LNKSTA
,
530 PCI_EXP_LNKSTA_DLLLA
);
532 pcie_cap_slot_event(hotplug_pdev
,
533 PCI_EXP_HP_EV_PDC
| PCI_EXP_HP_EV_ABP
);
534 pcie_cap_update_power(hotplug_pdev
);
538 void pcie_cap_slot_unplug_cb(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
544 static void pcie_unplug_device(PCIBus
*bus
, PCIDevice
*dev
, void *opaque
)
546 HotplugHandler
*hotplug_ctrl
= qdev_get_hotplug_handler(DEVICE(dev
));
548 if (dev
->partially_hotplugged
) {
549 dev
->qdev
.pending_deleted_event
= false;
552 hotplug_handler_unplug(hotplug_ctrl
, DEVICE(dev
), &error_abort
);
553 object_unparent(OBJECT(dev
));
556 static void pcie_cap_slot_do_unplug(PCIDevice
*dev
)
558 PCIBus
*sec_bus
= pci_bridge_get_sec_bus(PCI_BRIDGE(dev
));
559 uint8_t *exp_cap
= dev
->config
+ dev
->exp
.exp_cap
;
560 uint32_t lnkcap
= pci_get_long(exp_cap
+ PCI_EXP_LNKCAP
);
562 pci_for_each_device_under_bus(sec_bus
, pcie_unplug_device
, NULL
);
564 pci_word_test_and_clear_mask(exp_cap
+ PCI_EXP_SLTSTA
,
566 if (dev
->cap_present
& QEMU_PCIE_LNKSTA_DLLLA
||
567 (lnkcap
& PCI_EXP_LNKCAP_DLLLARC
)) {
568 pci_word_test_and_clear_mask(exp_cap
+ PCI_EXP_LNKSTA
,
569 PCI_EXP_LNKSTA_DLLLA
);
571 pci_word_test_and_set_mask(exp_cap
+ PCI_EXP_SLTSTA
,
575 void pcie_cap_slot_unplug_request_cb(HotplugHandler
*hotplug_dev
,
576 DeviceState
*dev
, Error
**errp
)
578 Error
*local_err
= NULL
;
579 PCIDevice
*pci_dev
= PCI_DEVICE(dev
);
580 PCIBus
*bus
= pci_get_bus(pci_dev
);
581 PCIDevice
*hotplug_pdev
= PCI_DEVICE(hotplug_dev
);
582 uint8_t *exp_cap
= hotplug_pdev
->config
+ hotplug_pdev
->exp
.exp_cap
;
583 uint32_t sltcap
= pci_get_word(exp_cap
+ PCI_EXP_SLTCAP
);
584 uint16_t sltctl
= pci_get_word(exp_cap
+ PCI_EXP_SLTCTL
);
586 /* Check if hot-unplug is disabled on the slot */
587 if ((sltcap
& PCI_EXP_SLTCAP_HPC
) == 0) {
588 error_setg(errp
, "Hot-unplug failed: "
589 "unsupported by the port device '%s'",
590 DEVICE(hotplug_pdev
)->id
);
594 pcie_cap_slot_plug_common(hotplug_pdev
, dev
, &local_err
);
596 error_propagate(errp
, local_err
);
600 if ((sltctl
& PCI_EXP_SLTCTL_PIC
) == PCI_EXP_SLTCTL_PWR_IND_BLINK
) {
601 error_setg(errp
, "Hot-unplug failed: "
602 "guest is busy (power indicator blinking)");
606 dev
->pending_deleted_event
= true;
607 dev
->pending_deleted_expires_ms
=
608 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + 5000; /* 5 secs */
610 /* In case user cancel the operation of multi-function hot-add,
611 * remove the function that is unexposed to guest individually,
612 * without interaction with guest.
614 if (pci_dev
->devfn
&&
616 pcie_unplug_device(bus
, pci_dev
, NULL
);
621 if (pcie_sltctl_powered_off(sltctl
)) {
622 /* slot is powered off -> unplug without round-trip to the guest */
623 pcie_cap_slot_do_unplug(hotplug_pdev
);
624 hotplug_event_notify(hotplug_pdev
);
625 pci_word_test_and_clear_mask(exp_cap
+ PCI_EXP_SLTSTA
,
630 pcie_cap_slot_push_attention_button(hotplug_pdev
);
633 /* pci express slot for pci express root/downstream port
634 PCI express capability slot registers */
635 void pcie_cap_slot_init(PCIDevice
*dev
, PCIESlot
*s
)
637 uint32_t pos
= dev
->exp
.exp_cap
;
639 pci_word_test_and_set_mask(dev
->config
+ pos
+ PCI_EXP_FLAGS
,
642 pci_long_test_and_clear_mask(dev
->config
+ pos
+ PCI_EXP_SLTCAP
,
643 ~PCI_EXP_SLTCAP_PSN
);
644 pci_long_test_and_set_mask(dev
->config
+ pos
+ PCI_EXP_SLTCAP
,
645 (s
->slot
<< PCI_EXP_SLTCAP_PSN_SHIFT
) |
652 * Expose native hot-plug on all bridges if hot-plug is enabled on the slot.
653 * (unless broken 6.1 ABI is enforced for compat reasons)
656 (!s
->hide_native_hotplug_cap
|| DEVICE(dev
)->hotplugged
)) {
657 pci_long_test_and_set_mask(dev
->config
+ pos
+ PCI_EXP_SLTCAP
,
662 if (dev
->cap_present
& QEMU_PCIE_SLTCAP_PCP
) {
663 pci_long_test_and_set_mask(dev
->config
+ pos
+ PCI_EXP_SLTCAP
,
665 pci_word_test_and_clear_mask(dev
->config
+ pos
+ PCI_EXP_SLTCTL
,
667 pci_word_test_and_set_mask(dev
->wmask
+ pos
+ PCI_EXP_SLTCTL
,
671 pci_word_test_and_clear_mask(dev
->config
+ pos
+ PCI_EXP_SLTCTL
,
674 pci_word_test_and_set_mask(dev
->config
+ pos
+ PCI_EXP_SLTCTL
,
675 PCI_EXP_SLTCTL_PWR_IND_OFF
|
676 PCI_EXP_SLTCTL_ATTN_IND_OFF
);
677 pci_word_test_and_set_mask(dev
->wmask
+ pos
+ PCI_EXP_SLTCTL
,
680 PCI_EXP_SLTCTL_HPIE
|
681 PCI_EXP_SLTCTL_CCIE
|
682 PCI_EXP_SLTCTL_PDCE
|
683 PCI_EXP_SLTCTL_ABPE
);
684 /* Although reading PCI_EXP_SLTCTL_EIC returns always 0,
685 * make the bit writable here in order to detect 1b is written.
686 * pcie_cap_slot_write_config() test-and-clear the bit, so
687 * this bit always returns 0 to the guest.
689 pci_word_test_and_set_mask(dev
->wmask
+ pos
+ PCI_EXP_SLTCTL
,
692 pci_word_test_and_set_mask(dev
->w1cmask
+ pos
+ PCI_EXP_SLTSTA
,
693 PCI_EXP_HP_EV_SUPPORTED
);
695 /* Avoid migration abortion when this device hot-removed by guest */
696 pci_word_test_and_clear_mask(dev
->cmask
+ pos
+ PCI_EXP_SLTSTA
,
699 dev
->exp
.hpev_notified
= false;
701 qbus_set_hotplug_handler(BUS(pci_bridge_get_sec_bus(PCI_BRIDGE(dev
))),
705 void pcie_cap_slot_reset(PCIDevice
*dev
)
707 uint8_t *exp_cap
= dev
->config
+ dev
->exp
.exp_cap
;
708 uint8_t port_type
= pcie_cap_get_type(dev
);
710 assert(port_type
== PCI_EXP_TYPE_DOWNSTREAM
||
711 port_type
== PCI_EXP_TYPE_ROOT_PORT
);
713 PCIE_DEV_PRINTF(dev
, "reset\n");
715 pci_word_test_and_clear_mask(exp_cap
+ PCI_EXP_SLTCTL
,
719 PCI_EXP_SLTCTL_HPIE
|
720 PCI_EXP_SLTCTL_CCIE
|
721 PCI_EXP_SLTCTL_PDCE
|
722 PCI_EXP_SLTCTL_ABPE
);
723 pci_word_test_and_set_mask(exp_cap
+ PCI_EXP_SLTCTL
,
724 PCI_EXP_SLTCTL_PWR_IND_OFF
|
725 PCI_EXP_SLTCTL_ATTN_IND_OFF
);
727 if (dev
->cap_present
& QEMU_PCIE_SLTCAP_PCP
) {
728 /* Downstream ports enforce device number 0. */
729 bool populated
= pci_bridge_get_sec_bus(PCI_BRIDGE(dev
))->devices
[0];
733 pci_word_test_and_clear_mask(exp_cap
+ PCI_EXP_SLTCTL
,
736 pci_word_test_and_set_mask(exp_cap
+ PCI_EXP_SLTCTL
,
741 PCI_EXP_SLTCTL_PWR_IND_ON
: PCI_EXP_SLTCTL_PWR_IND_OFF
;
742 pci_word_test_and_set_mask(exp_cap
+ PCI_EXP_SLTCTL
, pic
);
745 pci_word_test_and_clear_mask(exp_cap
+ PCI_EXP_SLTSTA
,
746 PCI_EXP_SLTSTA_EIS
|/* on reset,
747 the lock is released */
752 pcie_cap_update_power(dev
);
753 hotplug_event_update_event_status(dev
);
756 void pcie_cap_slot_get(PCIDevice
*dev
, uint16_t *slt_ctl
, uint16_t *slt_sta
)
758 uint32_t pos
= dev
->exp
.exp_cap
;
759 uint8_t *exp_cap
= dev
->config
+ pos
;
760 *slt_ctl
= pci_get_word(exp_cap
+ PCI_EXP_SLTCTL
);
761 *slt_sta
= pci_get_word(exp_cap
+ PCI_EXP_SLTSTA
);
764 static void find_child_fn(PCIBus
*bus
, PCIDevice
*dev
, void *opaque
)
766 PCIDevice
**child
= opaque
;
774 * Returns the plugged device or first function of multifunction plugged device
776 static PCIDevice
*pcie_cap_slot_find_child(PCIDevice
*dev
)
778 PCIBus
*sec_bus
= pci_bridge_get_sec_bus(PCI_BRIDGE(dev
));
779 PCIDevice
*child
= NULL
;
781 pci_for_each_device(sec_bus
, pci_bus_num(sec_bus
), find_child_fn
, &child
);
786 void pcie_cap_slot_write_config(PCIDevice
*dev
,
787 uint16_t old_slt_ctl
, uint16_t old_slt_sta
,
788 uint32_t addr
, uint32_t val
, int len
)
790 uint32_t pos
= dev
->exp
.exp_cap
;
791 uint8_t *exp_cap
= dev
->config
+ pos
;
792 uint16_t sltsta
= pci_get_word(exp_cap
+ PCI_EXP_SLTSTA
);
794 if (ranges_overlap(addr
, len
, pos
+ PCI_EXP_SLTSTA
, 2)) {
796 * Guests tend to clears all bits during init.
797 * If they clear bits that weren't set this is racy and will lose events:
798 * not a big problem for manual button presses, but a problem for us.
799 * As a work-around, detect this and revert status to what it was
802 * Note: in theory this can be detected as a duplicate button press
803 * which cancels the previous press. Does not seem to happen in
804 * practice as guests seem to only have this bug during init.
806 #define PCIE_SLOT_EVENTS (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | \
807 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | \
810 if (val
& ~old_slt_sta
& PCIE_SLOT_EVENTS
) {
811 sltsta
= (sltsta
& ~PCIE_SLOT_EVENTS
) | (old_slt_sta
& PCIE_SLOT_EVENTS
);
812 pci_set_word(exp_cap
+ PCI_EXP_SLTSTA
, sltsta
);
814 hotplug_event_clear(dev
);
817 if (!ranges_overlap(addr
, len
, pos
+ PCI_EXP_SLTCTL
, 2)) {
821 if (pci_word_test_and_clear_mask(exp_cap
+ PCI_EXP_SLTCTL
,
822 PCI_EXP_SLTCTL_EIC
)) {
823 sltsta
^= PCI_EXP_SLTSTA_EIS
; /* toggle PCI_EXP_SLTSTA_EIS bit */
824 pci_set_word(exp_cap
+ PCI_EXP_SLTSTA
, sltsta
);
825 PCIE_DEV_PRINTF(dev
, "PCI_EXP_SLTCTL_EIC: "
826 "sltsta -> 0x%02"PRIx16
"\n",
830 if (trace_event_get_state_backends(TRACE_PCIE_CAP_SLOT_WRITE_CONFIG
)) {
831 DeviceState
*parent
= DEVICE(dev
);
832 DeviceState
*child
= DEVICE(pcie_cap_slot_find_child(dev
));
834 trace_pcie_cap_slot_write_config(
835 parent
->canonical_path
,
836 child
? child
->canonical_path
: "no-child",
837 (sltsta
& PCI_EXP_SLTSTA_PDS
) ? "present" : "not present",
838 pcie_led_state_to_str(old_slt_ctl
& PCI_EXP_SLTCTL_PIC
),
839 pcie_led_state_to_str(val
& PCI_EXP_SLTCTL_PIC
),
840 pcie_led_state_to_str(old_slt_ctl
& PCI_EXP_SLTCTL_AIC
),
841 pcie_led_state_to_str(val
& PCI_EXP_SLTCTL_AIC
),
842 (old_slt_ctl
& PCI_EXP_SLTCTL_PWR_OFF
) ? "off" : "on",
843 (val
& PCI_EXP_SLTCTL_PWR_OFF
) ? "off" : "on");
847 * If the slot is populated, power indicator is off and power
848 * controller is off, it is safe to detach the devices.
850 * Note: don't detach if condition was already true:
851 * this is a work around for guests that overwrite
852 * control of powered off slots before powering them on.
854 if ((sltsta
& PCI_EXP_SLTSTA_PDS
) && pcie_sltctl_powered_off(val
) &&
855 !pcie_sltctl_powered_off(old_slt_ctl
))
857 pcie_cap_slot_do_unplug(dev
);
859 pcie_cap_update_power(dev
);
861 hotplug_event_notify(dev
);
864 * 6.7.3.2 Command Completed Events
866 * Software issues a command to a hot-plug capable Downstream Port by
867 * issuing a write transaction that targets any portion of the Port’s Slot
868 * Control register. A single write to the Slot Control register is
869 * considered to be a single command, even if the write affects more than
870 * one field in the Slot Control register. In response to this transaction,
871 * the Port must carry out the requested actions and then set the
872 * associated status field for the command completed event. */
874 /* Real hardware might take a while to complete requested command because
875 * physical movement would be involved like locking the electromechanical
876 * lock. However in our case, command is completed instantaneously above,
877 * so send a command completion event right now.
879 pcie_cap_slot_event(dev
, PCI_EXP_HP_EV_CCI
);
882 int pcie_cap_slot_post_load(void *opaque
, int version_id
)
884 PCIDevice
*dev
= opaque
;
885 hotplug_event_update_event_status(dev
);
886 pcie_cap_update_power(dev
);
890 void pcie_cap_slot_push_attention_button(PCIDevice
*dev
)
892 pcie_cap_slot_event(dev
, PCI_EXP_HP_EV_ABP
);
895 /* root control/capabilities/status. PME isn't emulated for now */
896 void pcie_cap_root_init(PCIDevice
*dev
)
898 pci_set_word(dev
->wmask
+ dev
->exp
.exp_cap
+ PCI_EXP_RTCTL
,
899 PCI_EXP_RTCTL_SECEE
| PCI_EXP_RTCTL_SENFEE
|
900 PCI_EXP_RTCTL_SEFEE
);
903 void pcie_cap_root_reset(PCIDevice
*dev
)
905 pci_set_word(dev
->config
+ dev
->exp
.exp_cap
+ PCI_EXP_RTCTL
, 0);
908 /* function level reset(FLR) */
909 void pcie_cap_flr_init(PCIDevice
*dev
)
911 pci_long_test_and_set_mask(dev
->config
+ dev
->exp
.exp_cap
+ PCI_EXP_DEVCAP
,
914 /* Although reading BCR_FLR returns always 0,
915 * the bit is made writable here in order to detect the 1b is written
916 * pcie_cap_flr_write_config() test-and-clear the bit, so
917 * this bit always returns 0 to the guest.
919 pci_word_test_and_set_mask(dev
->wmask
+ dev
->exp
.exp_cap
+ PCI_EXP_DEVCTL
,
920 PCI_EXP_DEVCTL_BCR_FLR
);
923 void pcie_cap_flr_write_config(PCIDevice
*dev
,
924 uint32_t addr
, uint32_t val
, int len
)
926 uint8_t *devctl
= dev
->config
+ dev
->exp
.exp_cap
+ PCI_EXP_DEVCTL
;
927 if (pci_get_word(devctl
) & PCI_EXP_DEVCTL_BCR_FLR
) {
928 /* Clear PCI_EXP_DEVCTL_BCR_FLR after invoking the reset handler
929 so the handler can detect FLR by looking at this bit. */
930 pci_device_reset(dev
);
931 pci_word_test_and_clear_mask(devctl
, PCI_EXP_DEVCTL_BCR_FLR
);
935 /* Alternative Routing-ID Interpretation (ARI)
936 * forwarding support for root and downstream ports
938 void pcie_cap_arifwd_init(PCIDevice
*dev
)
940 uint32_t pos
= dev
->exp
.exp_cap
;
941 pci_long_test_and_set_mask(dev
->config
+ pos
+ PCI_EXP_DEVCAP2
,
942 PCI_EXP_DEVCAP2_ARI
);
943 pci_long_test_and_set_mask(dev
->wmask
+ pos
+ PCI_EXP_DEVCTL2
,
944 PCI_EXP_DEVCTL2_ARI
);
947 void pcie_cap_arifwd_reset(PCIDevice
*dev
)
949 uint8_t *devctl2
= dev
->config
+ dev
->exp
.exp_cap
+ PCI_EXP_DEVCTL2
;
950 pci_long_test_and_clear_mask(devctl2
, PCI_EXP_DEVCTL2_ARI
);
953 bool pcie_cap_is_arifwd_enabled(const PCIDevice
*dev
)
955 if (!pci_is_express(dev
)) {
958 if (!dev
->exp
.exp_cap
) {
962 return pci_get_long(dev
->config
+ dev
->exp
.exp_cap
+ PCI_EXP_DEVCTL2
) &
966 /**************************************************************************
967 * pci express extended capability list management functions
968 * uint16_t ext_cap_id (16 bit)
969 * uint8_t cap_ver (4 bit)
970 * uint16_t cap_offset (12 bit)
971 * uint16_t ext_cap_size
974 /* Passing a cap_id value > 0xffff will return 0 and put end of list in prev */
975 static uint16_t pcie_find_capability_list(PCIDevice
*dev
, uint32_t cap_id
,
980 uint32_t header
= pci_get_long(dev
->config
+ PCI_CONFIG_SPACE_SIZE
);
983 /* no extended capability */
987 for (next
= PCI_CONFIG_SPACE_SIZE
; next
;
988 prev
= next
, next
= PCI_EXT_CAP_NEXT(header
)) {
990 assert(next
>= PCI_CONFIG_SPACE_SIZE
);
991 assert(next
<= PCIE_CONFIG_SPACE_SIZE
- 8);
993 header
= pci_get_long(dev
->config
+ next
);
994 if (PCI_EXT_CAP_ID(header
) == cap_id
) {
1006 uint16_t pcie_find_capability(PCIDevice
*dev
, uint16_t cap_id
)
1008 return pcie_find_capability_list(dev
, cap_id
, NULL
);
1011 static void pcie_ext_cap_set_next(PCIDevice
*dev
, uint16_t pos
, uint16_t next
)
1013 uint32_t header
= pci_get_long(dev
->config
+ pos
);
1014 assert(!(next
& (PCI_EXT_CAP_ALIGN
- 1)));
1015 header
= (header
& ~PCI_EXT_CAP_NEXT_MASK
) |
1016 ((next
<< PCI_EXT_CAP_NEXT_SHIFT
) & PCI_EXT_CAP_NEXT_MASK
);
1017 pci_set_long(dev
->config
+ pos
, header
);
1021 * Caller must supply valid (offset, size) such that the range wouldn't
1022 * overlap with other capability or other registers.
1023 * This function doesn't check it.
1025 void pcie_add_capability(PCIDevice
*dev
,
1026 uint16_t cap_id
, uint8_t cap_ver
,
1027 uint16_t offset
, uint16_t size
)
1029 assert(offset
>= PCI_CONFIG_SPACE_SIZE
);
1030 assert(offset
< (uint16_t)(offset
+ size
));
1031 assert((uint16_t)(offset
+ size
) <= PCIE_CONFIG_SPACE_SIZE
);
1033 assert(pci_is_express(dev
));
1035 if (offset
!= PCI_CONFIG_SPACE_SIZE
) {
1039 * 0xffffffff is not a valid cap id (it's a 16 bit field). use
1040 * internally to find the last capability in the linked list.
1042 pcie_find_capability_list(dev
, 0xffffffff, &prev
);
1043 assert(prev
>= PCI_CONFIG_SPACE_SIZE
);
1044 pcie_ext_cap_set_next(dev
, prev
, offset
);
1046 pci_set_long(dev
->config
+ offset
, PCI_EXT_CAP(cap_id
, cap_ver
, 0));
1048 /* Make capability read-only by default */
1049 memset(dev
->wmask
+ offset
, 0, size
);
1050 memset(dev
->w1cmask
+ offset
, 0, size
);
1051 /* Check capability by default */
1052 memset(dev
->cmask
+ offset
, 0xFF, size
);
1056 * Sync the PCIe Link Status negotiated speed and width of a bridge with the
1057 * downstream device. If downstream device is not present, re-write with the
1058 * Link Capability fields. If downstream device reports invalid width or
1059 * speed, replace with minimum values (LnkSta fields are RsvdZ on VFs but such
1060 * values interfere with PCIe native hotplug detecting new devices). Limit
1061 * width and speed to bridge capabilities for compatibility. Use config_read
1062 * to access the downstream device since it could be an assigned device with
1063 * volatile link information.
1065 void pcie_sync_bridge_lnk(PCIDevice
*bridge_dev
)
1067 PCIBridge
*br
= PCI_BRIDGE(bridge_dev
);
1068 PCIBus
*bus
= pci_bridge_get_sec_bus(br
);
1069 PCIDevice
*target
= bus
->devices
[0];
1070 uint8_t *exp_cap
= bridge_dev
->config
+ bridge_dev
->exp
.exp_cap
;
1071 uint16_t lnksta
, lnkcap
= pci_get_word(exp_cap
+ PCI_EXP_LNKCAP
);
1073 if (!target
|| !target
->exp
.exp_cap
) {
1076 lnksta
= target
->config_read(target
,
1077 target
->exp
.exp_cap
+ PCI_EXP_LNKSTA
,
1080 if ((lnksta
& PCI_EXP_LNKSTA_NLW
) > (lnkcap
& PCI_EXP_LNKCAP_MLW
)) {
1081 lnksta
&= ~PCI_EXP_LNKSTA_NLW
;
1082 lnksta
|= lnkcap
& PCI_EXP_LNKCAP_MLW
;
1083 } else if (!(lnksta
& PCI_EXP_LNKSTA_NLW
)) {
1084 lnksta
|= QEMU_PCI_EXP_LNKSTA_NLW(QEMU_PCI_EXP_LNK_X1
);
1087 if ((lnksta
& PCI_EXP_LNKSTA_CLS
) > (lnkcap
& PCI_EXP_LNKCAP_SLS
)) {
1088 lnksta
&= ~PCI_EXP_LNKSTA_CLS
;
1089 lnksta
|= lnkcap
& PCI_EXP_LNKCAP_SLS
;
1090 } else if (!(lnksta
& PCI_EXP_LNKSTA_CLS
)) {
1091 lnksta
|= QEMU_PCI_EXP_LNKSTA_CLS(QEMU_PCI_EXP_LNK_2_5GT
);
1095 pci_word_test_and_clear_mask(exp_cap
+ PCI_EXP_LNKSTA
,
1096 PCI_EXP_LNKSTA_CLS
| PCI_EXP_LNKSTA_NLW
);
1097 pci_word_test_and_set_mask(exp_cap
+ PCI_EXP_LNKSTA
, lnksta
&
1098 (PCI_EXP_LNKSTA_CLS
| PCI_EXP_LNKSTA_NLW
));
1101 /**************************************************************************
1102 * pci express extended capability helper functions
1106 void pcie_ari_init(PCIDevice
*dev
, uint16_t offset
)
1108 uint16_t nextfn
= dev
->cap_present
& QEMU_PCIE_ARI_NEXTFN_1
? 1 : 0;
1110 pcie_add_capability(dev
, PCI_EXT_CAP_ID_ARI
, PCI_ARI_VER
,
1111 offset
, PCI_ARI_SIZEOF
);
1112 pci_set_long(dev
->config
+ offset
+ PCI_ARI_CAP
, (nextfn
& 0xff) << 8);
1115 void pcie_dev_ser_num_init(PCIDevice
*dev
, uint16_t offset
, uint64_t ser_num
)
1117 static const int pci_dsn_ver
= 1;
1118 static const int pci_dsn_cap
= 4;
1120 pcie_add_capability(dev
, PCI_EXT_CAP_ID_DSN
, pci_dsn_ver
, offset
,
1121 PCI_EXT_CAP_DSN_SIZEOF
);
1122 pci_set_quad(dev
->config
+ offset
+ pci_dsn_cap
, ser_num
);
1125 void pcie_ats_init(PCIDevice
*dev
, uint16_t offset
, bool aligned
)
1127 pcie_add_capability(dev
, PCI_EXT_CAP_ID_ATS
, 0x1,
1128 offset
, PCI_EXT_CAP_ATS_SIZEOF
);
1130 dev
->exp
.ats_cap
= offset
;
1132 /* Invalidate Queue Depth 0 */
1134 pci_set_word(dev
->config
+ offset
+ PCI_ATS_CAP
,
1135 PCI_ATS_CAP_PAGE_ALIGNED
);
1137 /* STU 0, Disabled by default */
1138 pci_set_word(dev
->config
+ offset
+ PCI_ATS_CTRL
, 0);
1140 pci_set_word(dev
->wmask
+ dev
->exp
.ats_cap
+ PCI_ATS_CTRL
, 0x800f);
1143 /* ACS (Access Control Services) */
1144 void pcie_acs_init(PCIDevice
*dev
, uint16_t offset
)
1146 bool is_downstream
= pci_is_express_downstream_port(dev
);
1147 uint16_t cap_bits
= 0;
1149 /* For endpoints, only multifunction devs may have an ACS capability: */
1150 assert(is_downstream
||
1151 (dev
->cap_present
& QEMU_PCI_CAP_MULTIFUNCTION
) ||
1152 PCI_FUNC(dev
->devfn
));
1154 pcie_add_capability(dev
, PCI_EXT_CAP_ID_ACS
, PCI_ACS_VER
, offset
,
1156 dev
->exp
.acs_cap
= offset
;
1158 if (is_downstream
) {
1160 * Downstream ports must implement SV, TB, RR, CR, UF, and DT (with
1161 * caveats on the latter four that we ignore for simplicity).
1162 * Endpoints may also implement a subset of ACS capabilities,
1163 * but these are optional if the endpoint does not support
1164 * peer-to-peer between functions and thus omitted here.
1166 cap_bits
= PCI_ACS_SV
| PCI_ACS_TB
| PCI_ACS_RR
|
1167 PCI_ACS_CR
| PCI_ACS_UF
| PCI_ACS_DT
;
1170 pci_set_word(dev
->config
+ offset
+ PCI_ACS_CAP
, cap_bits
);
1171 pci_set_word(dev
->wmask
+ offset
+ PCI_ACS_CTRL
, cap_bits
);
1174 void pcie_acs_reset(PCIDevice
*dev
)
1176 if (dev
->exp
.acs_cap
) {
1177 pci_set_word(dev
->config
+ dev
->exp
.acs_cap
+ PCI_ACS_CTRL
, 0);