1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 #include <device/pci_def.h>
4 #include <device/pci_mmio_cfg.h>
5 #include <device/pci_ops.h>
6 #include <northbridge/intel/haswell/haswell.h>
7 #include <northbridge/intel/haswell/vcu_mailbox.h>
10 #define PEG_DEV(func) PCI_DEV(0, 1, func)
12 #define MAX_PEG_FUNC 3
14 static void peg_dmi_unset_and_set_mask_pcicfg(
15 volatile union pci_bank
*const bank
,
16 const uint32_t offset
,
17 const uint32_t unset_mask
,
18 const uint32_t set_mask
,
25 volatile uint32_t *const addr
= &bank
->reg32
[offset
/ sizeof(uint32_t)];
26 clrsetbits32(addr
, unset_mask
<< shift
, set_mask
<< shift
);
29 static void peg_dmi_unset_and_set_mask_common(
31 const uint32_t offset
,
37 const uint32_t unset_mask
= unset
<< shift
;
38 const uint32_t set_mask
= set
<< shift
;
40 for (uint8_t i
= 0; i
< MAX_PEG_FUNC
; i
++)
41 pci_update_config32(PEG_DEV(i
), offset
, ~unset_mask
, set_mask
);
43 dmibar_clrsetbits32(offset
, unset_mask
, set_mask
);
47 static void peg_dmi_unset_and_set_mask_vcu_mmio(
49 const uint32_t unset_mask
,
50 const uint32_t set_mask
,
57 vcu_update_mmio(addr
, ~(unset_mask
<< shift
), set_mask
<< shift
);
60 #define BUNDLE_STEP 0x20
62 static void *const dmibar
= (void *)(uintptr_t)CONFIG_FIXED_DMIBAR_MMIO_BASE
;
64 void peg_dmi_recipe(const bool is_peg
, const pci_devfn_t dev
)
66 const bool always
= true;
67 const bool is_dmi
= !is_peg
;
69 /* Treat DMIBAR and PEG devices the same way */
70 volatile union pci_bank
*const bank
= is_peg
? pci_map_bus(dev
) : dmibar
;
72 const size_t bundles
= (is_peg
? 8 : 2) * BUNDLE_STEP
;
74 for (size_t i
= 0; i
< bundles
; i
+= BUNDLE_STEP
) {
75 /* These are actually per-lane */
76 peg_dmi_unset_and_set_mask_pcicfg(bank
, 0xa00 + i
, 0x1f, 0x0c, 0, always
);
77 peg_dmi_unset_and_set_mask_pcicfg(bank
, 0xa10 + i
, 0x1f, 0x0c, 0, always
);
80 for (size_t i
= 0; i
< bundles
; i
+= BUNDLE_STEP
)
81 peg_dmi_unset_and_set_mask_pcicfg(bank
, 0x904 + i
, 0x1f, 0x02, 0, is_peg
);
83 for (size_t i
= 0; i
< bundles
; i
+= BUNDLE_STEP
)
84 peg_dmi_unset_and_set_mask_pcicfg(bank
, 0x904 + i
, 0x1f, 0x03, 5, is_peg
);
86 for (size_t i
= 0; i
< bundles
; i
+= BUNDLE_STEP
)
87 peg_dmi_unset_and_set_mask_pcicfg(bank
, 0x90c + i
, 0x3f, 0x09, 5, always
);
89 for (size_t i
= 0; i
< bundles
; i
+= BUNDLE_STEP
)
90 peg_dmi_unset_and_set_mask_pcicfg(bank
, 0x90c + i
, 0x0f, 0x05, 21, is_peg
);
92 for (size_t i
= 0; i
< bundles
; i
+= BUNDLE_STEP
)
93 peg_dmi_unset_and_set_mask_pcicfg(bank
, 0x910 + i
, 0x0f, 0x08, 6, is_peg
);
95 for (size_t i
= 0; i
< bundles
; i
+= BUNDLE_STEP
)
96 peg_dmi_unset_and_set_mask_pcicfg(bank
, 0x910 + i
, 0x0f, 0x00, 10, always
);
98 for (size_t i
= 0; i
< bundles
; i
+= BUNDLE_STEP
)
99 peg_dmi_unset_and_set_mask_pcicfg(bank
, 0x910 + i
, 0x07, 0x00, 18, always
);
101 peg_dmi_unset_and_set_mask_vcu_mmio(0x0c008001, 0x1f, 0x03, 25, is_peg
);
102 peg_dmi_unset_and_set_mask_vcu_mmio(0x0c0c8001, 0x3f, 0x00, 23, is_dmi
);
104 peg_dmi_unset_and_set_mask_pcicfg(bank
, 0xc28, 0x1f, 0x13, 18, always
);
106 peg_dmi_unset_and_set_mask_common(is_peg
, 0xc38, 0x01, 0x00, 6, always
);
107 peg_dmi_unset_and_set_mask_common(is_peg
, 0x260, 0x03, 0x02, 0, always
);
109 for (size_t i
= 0; i
< bundles
; i
+= BUNDLE_STEP
)
110 peg_dmi_unset_and_set_mask_pcicfg(bank
, 0x900 + i
, 0x03, 0x00, 26, always
);
112 for (size_t i
= 0; i
< bundles
; i
+= BUNDLE_STEP
)
113 peg_dmi_unset_and_set_mask_pcicfg(bank
, 0x904 + i
, 0x03, 0x03, 10, always
);
115 for (size_t i
= 0; i
< bundles
; i
+= BUNDLE_STEP
)
116 peg_dmi_unset_and_set_mask_pcicfg(bank
, 0x90c + i
, 0x1f, 0x07, 25, is_peg
);
118 for (size_t i
= 0; i
< bundles
; i
+= BUNDLE_STEP
)
119 peg_dmi_unset_and_set_mask_pcicfg(bank
, 0x91c + i
, 0x07, 0x05, 27, is_peg
);