1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for the Aardvark PCIe controller, used on Marvell Armada
6 * Copyright (C) 2016 Marvell
8 * Author: Hezi Shahmoon <hezi.shahmoon@marvell.com>
11 #include <linux/bitfield.h>
12 #include <linux/delay.h>
13 #include <linux/gpio/consumer.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/irqdomain.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <linux/pci-ecam.h>
21 #include <linux/init.h>
22 #include <linux/phy/phy.h>
23 #include <linux/platform_device.h>
24 #include <linux/msi.h>
25 #include <linux/of_address.h>
26 #include <linux/of_pci.h>
29 #include "../pci-bridge-emul.h"
31 /* PCIe core registers */
32 #define PCIE_CORE_DEV_ID_REG 0x0
33 #define PCIE_CORE_CMD_STATUS_REG 0x4
34 #define PCIE_CORE_DEV_REV_REG 0x8
35 #define PCIE_CORE_SSDEV_ID_REG 0x2c
36 #define PCIE_CORE_PCIEXP_CAP 0xc0
37 #define PCIE_CORE_PCIERR_CAP 0x100
38 #define PCIE_CORE_ERR_CAPCTL_REG 0x118
39 #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
40 #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
41 #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7)
42 #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8)
43 /* PIO registers base address and register offsets */
44 #define PIO_BASE_ADDR 0x4000
45 #define PIO_CTRL (PIO_BASE_ADDR + 0x0)
46 #define PIO_CTRL_TYPE_MASK GENMASK(3, 0)
47 #define PIO_CTRL_ADDR_WIN_DISABLE BIT(24)
48 #define PIO_STAT (PIO_BASE_ADDR + 0x4)
49 #define PIO_COMPLETION_STATUS_SHIFT 7
50 #define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7)
51 #define PIO_COMPLETION_STATUS_OK 0
52 #define PIO_COMPLETION_STATUS_UR 1
53 #define PIO_COMPLETION_STATUS_RRS 2
54 #define PIO_COMPLETION_STATUS_CA 4
55 #define PIO_NON_POSTED_REQ BIT(10)
56 #define PIO_ERR_STATUS BIT(11)
57 #define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8)
58 #define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc)
59 #define PIO_WR_DATA (PIO_BASE_ADDR + 0x10)
60 #define PIO_WR_DATA_STRB (PIO_BASE_ADDR + 0x14)
61 #define PIO_RD_DATA (PIO_BASE_ADDR + 0x18)
62 #define PIO_START (PIO_BASE_ADDR + 0x1c)
63 #define PIO_ISR (PIO_BASE_ADDR + 0x20)
64 #define PIO_ISRM (PIO_BASE_ADDR + 0x24)
66 /* Aardvark Control registers */
67 #define CONTROL_BASE_ADDR 0x4800
68 #define PCIE_CORE_CTRL0_REG (CONTROL_BASE_ADDR + 0x0)
69 #define PCIE_GEN_SEL_MSK 0x3
70 #define PCIE_GEN_SEL_SHIFT 0x0
76 #define LANE_CNT_MSK 0x18
77 #define LANE_CNT_SHIFT 0x3
78 #define LANE_COUNT_1 (0 << LANE_CNT_SHIFT)
79 #define LANE_COUNT_2 (1 << LANE_CNT_SHIFT)
80 #define LANE_COUNT_4 (2 << LANE_CNT_SHIFT)
81 #define LANE_COUNT_8 (3 << LANE_CNT_SHIFT)
82 #define LINK_TRAINING_EN BIT(6)
83 #define LEGACY_INTA BIT(28)
84 #define LEGACY_INTB BIT(29)
85 #define LEGACY_INTC BIT(30)
86 #define LEGACY_INTD BIT(31)
87 #define PCIE_CORE_CTRL1_REG (CONTROL_BASE_ADDR + 0x4)
88 #define HOT_RESET_GEN BIT(0)
89 #define PCIE_CORE_CTRL2_REG (CONTROL_BASE_ADDR + 0x8)
90 #define PCIE_CORE_CTRL2_RESERVED 0x7
91 #define PCIE_CORE_CTRL2_TD_ENABLE BIT(4)
92 #define PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE BIT(5)
93 #define PCIE_CORE_CTRL2_OB_WIN_ENABLE BIT(6)
94 #define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10)
95 #define PCIE_CORE_REF_CLK_REG (CONTROL_BASE_ADDR + 0x14)
96 #define PCIE_CORE_REF_CLK_TX_ENABLE BIT(1)
97 #define PCIE_CORE_REF_CLK_RX_ENABLE BIT(2)
98 #define PCIE_MSG_LOG_REG (CONTROL_BASE_ADDR + 0x30)
99 #define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40)
100 #define PCIE_MSG_PM_PME_MASK BIT(7)
101 #define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44)
102 #define PCIE_ISR0_MSI_INT_PENDING BIT(24)
103 #define PCIE_ISR0_CORR_ERR BIT(11)
104 #define PCIE_ISR0_NFAT_ERR BIT(12)
105 #define PCIE_ISR0_FAT_ERR BIT(13)
106 #define PCIE_ISR0_ERR_MASK GENMASK(13, 11)
107 #define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val))
108 #define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val))
109 #define PCIE_ISR0_ALL_MASK GENMASK(31, 0)
110 #define PCIE_ISR1_REG (CONTROL_BASE_ADDR + 0x48)
111 #define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C)
112 #define PCIE_ISR1_POWER_STATE_CHANGE BIT(4)
113 #define PCIE_ISR1_FLUSH BIT(5)
114 #define PCIE_ISR1_INTX_ASSERT(val) BIT(8 + (val))
115 #define PCIE_ISR1_ALL_MASK GENMASK(31, 0)
116 #define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50)
117 #define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54)
118 #define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58)
119 #define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
120 #define PCIE_MSI_ALL_MASK GENMASK(31, 0)
121 #define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
122 #define PCIE_MSI_DATA_MASK GENMASK(15, 0)
124 /* PCIe window configuration */
125 #define OB_WIN_BASE_ADDR 0x4c00
126 #define OB_WIN_BLOCK_SIZE 0x20
127 #define OB_WIN_COUNT 8
128 #define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \
129 OB_WIN_BLOCK_SIZE * (win) + \
131 #define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00)
132 #define OB_WIN_ENABLE BIT(0)
133 #define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04)
134 #define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08)
135 #define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c)
136 #define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10)
137 #define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14)
138 #define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18)
139 #define OB_WIN_DEFAULT_ACTIONS (OB_WIN_ACTIONS(OB_WIN_COUNT-1) + 0x4)
140 #define OB_WIN_FUNC_NUM_MASK GENMASK(31, 24)
141 #define OB_WIN_FUNC_NUM_SHIFT 24
142 #define OB_WIN_FUNC_NUM_ENABLE BIT(23)
143 #define OB_WIN_BUS_NUM_BITS_MASK GENMASK(22, 20)
144 #define OB_WIN_BUS_NUM_BITS_SHIFT 20
145 #define OB_WIN_MSG_CODE_ENABLE BIT(22)
146 #define OB_WIN_MSG_CODE_MASK GENMASK(21, 14)
147 #define OB_WIN_MSG_CODE_SHIFT 14
148 #define OB_WIN_MSG_PAYLOAD_LEN BIT(12)
149 #define OB_WIN_ATTR_ENABLE BIT(11)
150 #define OB_WIN_ATTR_TC_MASK GENMASK(10, 8)
151 #define OB_WIN_ATTR_TC_SHIFT 8
152 #define OB_WIN_ATTR_RELAXED BIT(7)
153 #define OB_WIN_ATTR_NOSNOOP BIT(6)
154 #define OB_WIN_ATTR_POISON BIT(5)
155 #define OB_WIN_ATTR_IDO BIT(4)
156 #define OB_WIN_TYPE_MASK GENMASK(3, 0)
157 #define OB_WIN_TYPE_SHIFT 0
158 #define OB_WIN_TYPE_MEM 0x0
159 #define OB_WIN_TYPE_IO 0x4
160 #define OB_WIN_TYPE_CONFIG_TYPE0 0x8
161 #define OB_WIN_TYPE_CONFIG_TYPE1 0x9
162 #define OB_WIN_TYPE_MSG 0xc
164 /* LMI registers base address and register offsets */
165 #define LMI_BASE_ADDR 0x6000
166 #define CFG_REG (LMI_BASE_ADDR + 0x0)
167 #define LTSSM_SHIFT 24
168 #define LTSSM_MASK 0x3f
169 #define RC_BAR_CONFIG 0x300
171 /* LTSSM values in CFG_REG */
173 LTSSM_DETECT_QUIET
= 0x0,
174 LTSSM_DETECT_ACTIVE
= 0x1,
175 LTSSM_POLLING_ACTIVE
= 0x2,
176 LTSSM_POLLING_COMPLIANCE
= 0x3,
177 LTSSM_POLLING_CONFIGURATION
= 0x4,
178 LTSSM_CONFIG_LINKWIDTH_START
= 0x5,
179 LTSSM_CONFIG_LINKWIDTH_ACCEPT
= 0x6,
180 LTSSM_CONFIG_LANENUM_ACCEPT
= 0x7,
181 LTSSM_CONFIG_LANENUM_WAIT
= 0x8,
182 LTSSM_CONFIG_COMPLETE
= 0x9,
183 LTSSM_CONFIG_IDLE
= 0xa,
184 LTSSM_RECOVERY_RCVR_LOCK
= 0xb,
185 LTSSM_RECOVERY_SPEED
= 0xc,
186 LTSSM_RECOVERY_RCVR_CFG
= 0xd,
187 LTSSM_RECOVERY_IDLE
= 0xe,
189 LTSSM_RX_L0S_ENTRY
= 0x11,
190 LTSSM_RX_L0S_IDLE
= 0x12,
191 LTSSM_RX_L0S_FTS
= 0x13,
192 LTSSM_TX_L0S_ENTRY
= 0x14,
193 LTSSM_TX_L0S_IDLE
= 0x15,
194 LTSSM_TX_L0S_FTS
= 0x16,
195 LTSSM_L1_ENTRY
= 0x17,
196 LTSSM_L1_IDLE
= 0x18,
197 LTSSM_L2_IDLE
= 0x19,
198 LTSSM_L2_TRANSMIT_WAKE
= 0x1a,
199 LTSSM_DISABLED
= 0x20,
200 LTSSM_LOOPBACK_ENTRY_MASTER
= 0x21,
201 LTSSM_LOOPBACK_ACTIVE_MASTER
= 0x22,
202 LTSSM_LOOPBACK_EXIT_MASTER
= 0x23,
203 LTSSM_LOOPBACK_ENTRY_SLAVE
= 0x24,
204 LTSSM_LOOPBACK_ACTIVE_SLAVE
= 0x25,
205 LTSSM_LOOPBACK_EXIT_SLAVE
= 0x26,
206 LTSSM_HOT_RESET
= 0x27,
207 LTSSM_RECOVERY_EQUALIZATION_PHASE0
= 0x28,
208 LTSSM_RECOVERY_EQUALIZATION_PHASE1
= 0x29,
209 LTSSM_RECOVERY_EQUALIZATION_PHASE2
= 0x2a,
210 LTSSM_RECOVERY_EQUALIZATION_PHASE3
= 0x2b,
213 #define VENDOR_ID_REG (LMI_BASE_ADDR + 0x44)
215 /* PCIe core controller registers */
216 #define CTRL_CORE_BASE_ADDR 0x18000
217 #define CTRL_CONFIG_REG (CTRL_CORE_BASE_ADDR + 0x0)
218 #define CTRL_MODE_SHIFT 0x0
219 #define CTRL_MODE_MASK 0x1
220 #define PCIE_CORE_MODE_DIRECT 0x0
221 #define PCIE_CORE_MODE_COMMAND 0x1
223 /* PCIe Central Interrupts Registers */
224 #define CENTRAL_INT_BASE_ADDR 0x1b000
225 #define HOST_CTRL_INT_STATUS_REG (CENTRAL_INT_BASE_ADDR + 0x0)
226 #define HOST_CTRL_INT_MASK_REG (CENTRAL_INT_BASE_ADDR + 0x4)
227 #define PCIE_IRQ_CMDQ_INT BIT(0)
228 #define PCIE_IRQ_MSI_STATUS_INT BIT(1)
229 #define PCIE_IRQ_CMD_SENT_DONE BIT(3)
230 #define PCIE_IRQ_DMA_INT BIT(4)
231 #define PCIE_IRQ_IB_DXFERDONE BIT(5)
232 #define PCIE_IRQ_OB_DXFERDONE BIT(6)
233 #define PCIE_IRQ_OB_RXFERDONE BIT(7)
234 #define PCIE_IRQ_COMPQ_INT BIT(12)
235 #define PCIE_IRQ_DIR_RD_DDR_DET BIT(13)
236 #define PCIE_IRQ_DIR_WR_DDR_DET BIT(14)
237 #define PCIE_IRQ_CORE_INT BIT(16)
238 #define PCIE_IRQ_CORE_INT_PIO BIT(17)
239 #define PCIE_IRQ_DPMU_INT BIT(18)
240 #define PCIE_IRQ_PCIE_MIS_INT BIT(19)
241 #define PCIE_IRQ_MSI_INT1_DET BIT(20)
242 #define PCIE_IRQ_MSI_INT2_DET BIT(21)
243 #define PCIE_IRQ_RC_DBELL_DET BIT(22)
244 #define PCIE_IRQ_EP_STATUS BIT(23)
245 #define PCIE_IRQ_ALL_MASK GENMASK(31, 0)
246 #define PCIE_IRQ_ENABLE_INTS_MASK PCIE_IRQ_CORE_INT
248 /* Transaction types */
249 #define PCIE_CONFIG_RD_TYPE0 0x8
250 #define PCIE_CONFIG_RD_TYPE1 0x9
251 #define PCIE_CONFIG_WR_TYPE0 0xa
252 #define PCIE_CONFIG_WR_TYPE1 0xb
254 #define PIO_RETRY_CNT 750000 /* 1.5 s */
255 #define PIO_RETRY_DELAY 2 /* 2 us*/
257 #define LINK_WAIT_MAX_RETRIES 10
258 #define LINK_WAIT_USLEEP_MIN 90000
259 #define LINK_WAIT_USLEEP_MAX 100000
260 #define RETRAIN_WAIT_MAX_RETRIES 10
261 #define RETRAIN_WAIT_USLEEP_US 2000
263 #define MSI_IRQ_NUM 32
265 #define CFG_RD_RRS_VAL 0xffff0001
268 struct platform_device
*pdev
;
275 } wins
[OB_WIN_COUNT
];
277 struct irq_domain
*rp_irq_domain
;
278 struct irq_domain
*irq_domain
;
279 struct irq_chip irq_chip
;
280 raw_spinlock_t irq_lock
;
281 struct irq_domain
*msi_domain
;
282 struct irq_domain
*msi_inner_domain
;
283 raw_spinlock_t msi_irq_lock
;
284 DECLARE_BITMAP(msi_used
, MSI_IRQ_NUM
);
285 struct mutex msi_used_lock
;
287 struct pci_bridge_emul bridge
;
288 struct gpio_desc
*reset_gpio
;
292 static inline void advk_writel(struct advk_pcie
*pcie
, u32 val
, u64 reg
)
294 writel(val
, pcie
->base
+ reg
);
297 static inline u32
advk_readl(struct advk_pcie
*pcie
, u64 reg
)
299 return readl(pcie
->base
+ reg
);
302 static u8
advk_pcie_ltssm_state(struct advk_pcie
*pcie
)
307 val
= advk_readl(pcie
, CFG_REG
);
308 ltssm_state
= (val
>> LTSSM_SHIFT
) & LTSSM_MASK
;
312 static inline bool advk_pcie_link_up(struct advk_pcie
*pcie
)
314 /* check if LTSSM is in normal operation - some L* state */
315 u8 ltssm_state
= advk_pcie_ltssm_state(pcie
);
316 return ltssm_state
>= LTSSM_L0
&& ltssm_state
< LTSSM_DISABLED
;
319 static inline bool advk_pcie_link_active(struct advk_pcie
*pcie
)
322 * According to PCIe Base specification 3.0, Table 4-14: Link
323 * Status Mapped to the LTSSM, and 4.2.6.3.6 Configuration.Idle
324 * is Link Up mapped to LTSSM Configuration.Idle, Recovery, L0,
325 * L0s, L1 and L2 states. And according to 3.2.1. Data Link
326 * Control and Management State Machine Rules is DL Up status
327 * reported in DL Active state.
329 u8 ltssm_state
= advk_pcie_ltssm_state(pcie
);
330 return ltssm_state
>= LTSSM_CONFIG_IDLE
&& ltssm_state
< LTSSM_DISABLED
;
333 static inline bool advk_pcie_link_training(struct advk_pcie
*pcie
)
336 * According to PCIe Base specification 3.0, Table 4-14: Link
337 * Status Mapped to the LTSSM is Link Training mapped to LTSSM
338 * Configuration and Recovery states.
340 u8 ltssm_state
= advk_pcie_ltssm_state(pcie
);
341 return ((ltssm_state
>= LTSSM_CONFIG_LINKWIDTH_START
&&
342 ltssm_state
< LTSSM_L0
) ||
343 (ltssm_state
>= LTSSM_RECOVERY_EQUALIZATION_PHASE0
&&
344 ltssm_state
<= LTSSM_RECOVERY_EQUALIZATION_PHASE3
));
347 static int advk_pcie_wait_for_link(struct advk_pcie
*pcie
)
351 /* check if the link is up or not */
352 for (retries
= 0; retries
< LINK_WAIT_MAX_RETRIES
; retries
++) {
353 if (advk_pcie_link_up(pcie
))
356 usleep_range(LINK_WAIT_USLEEP_MIN
, LINK_WAIT_USLEEP_MAX
);
362 static void advk_pcie_wait_for_retrain(struct advk_pcie
*pcie
)
366 for (retries
= 0; retries
< RETRAIN_WAIT_MAX_RETRIES
; ++retries
) {
367 if (advk_pcie_link_training(pcie
))
369 udelay(RETRAIN_WAIT_USLEEP_US
);
373 static void advk_pcie_issue_perst(struct advk_pcie
*pcie
)
375 if (!pcie
->reset_gpio
)
378 /* 10ms delay is needed for some cards */
379 dev_info(&pcie
->pdev
->dev
, "issuing PERST via reset GPIO for 10ms\n");
380 gpiod_set_value_cansleep(pcie
->reset_gpio
, 1);
381 usleep_range(10000, 11000);
382 gpiod_set_value_cansleep(pcie
->reset_gpio
, 0);
385 static void advk_pcie_train_link(struct advk_pcie
*pcie
)
387 struct device
*dev
= &pcie
->pdev
->dev
;
392 * Setup PCIe rev / gen compliance based on device tree property
393 * 'max-link-speed' which also forces maximal link speed.
395 reg
= advk_readl(pcie
, PCIE_CORE_CTRL0_REG
);
396 reg
&= ~PCIE_GEN_SEL_MSK
;
397 if (pcie
->link_gen
== 3)
399 else if (pcie
->link_gen
== 2)
403 advk_writel(pcie
, reg
, PCIE_CORE_CTRL0_REG
);
406 * Set maximal link speed value also into PCIe Link Control 2 register.
407 * Armada 3700 Functional Specification says that default value is based
408 * on SPEED_GEN but tests showed that default value is always 8.0 GT/s.
410 reg
= advk_readl(pcie
, PCIE_CORE_PCIEXP_CAP
+ PCI_EXP_LNKCTL2
);
411 reg
&= ~PCI_EXP_LNKCTL2_TLS
;
412 if (pcie
->link_gen
== 3)
413 reg
|= PCI_EXP_LNKCTL2_TLS_8_0GT
;
414 else if (pcie
->link_gen
== 2)
415 reg
|= PCI_EXP_LNKCTL2_TLS_5_0GT
;
417 reg
|= PCI_EXP_LNKCTL2_TLS_2_5GT
;
418 advk_writel(pcie
, reg
, PCIE_CORE_PCIEXP_CAP
+ PCI_EXP_LNKCTL2
);
420 /* Enable link training after selecting PCIe generation */
421 reg
= advk_readl(pcie
, PCIE_CORE_CTRL0_REG
);
422 reg
|= LINK_TRAINING_EN
;
423 advk_writel(pcie
, reg
, PCIE_CORE_CTRL0_REG
);
426 * Reset PCIe card via PERST# signal. Some cards are not detected
427 * during link training when they are in some non-initial state.
429 advk_pcie_issue_perst(pcie
);
432 * PERST# signal could have been asserted by pinctrl subsystem before
433 * probe() callback has been called or issued explicitly by reset gpio
434 * function advk_pcie_issue_perst(), making the endpoint going into
435 * fundamental reset. As required by PCI Express spec (PCI Express
436 * Base Specification, REV. 4.0 PCI Express, February 19 2014, 6.6.1
437 * Conventional Reset) a delay for at least 100ms after such a reset
438 * before sending a Configuration Request to the device is needed.
439 * So wait until PCIe link is up. Function advk_pcie_wait_for_link()
440 * waits for link at least 900ms.
442 ret
= advk_pcie_wait_for_link(pcie
);
444 dev_err(dev
, "link never came up\n");
446 dev_info(dev
, "link up\n");
450 * Set PCIe address window register which could be used for memory
453 static void advk_pcie_set_ob_win(struct advk_pcie
*pcie
, u8 win_num
,
454 phys_addr_t match
, phys_addr_t remap
,
455 phys_addr_t mask
, u32 actions
)
457 advk_writel(pcie
, OB_WIN_ENABLE
|
458 lower_32_bits(match
), OB_WIN_MATCH_LS(win_num
));
459 advk_writel(pcie
, upper_32_bits(match
), OB_WIN_MATCH_MS(win_num
));
460 advk_writel(pcie
, lower_32_bits(remap
), OB_WIN_REMAP_LS(win_num
));
461 advk_writel(pcie
, upper_32_bits(remap
), OB_WIN_REMAP_MS(win_num
));
462 advk_writel(pcie
, lower_32_bits(mask
), OB_WIN_MASK_LS(win_num
));
463 advk_writel(pcie
, upper_32_bits(mask
), OB_WIN_MASK_MS(win_num
));
464 advk_writel(pcie
, actions
, OB_WIN_ACTIONS(win_num
));
467 static void advk_pcie_disable_ob_win(struct advk_pcie
*pcie
, u8 win_num
)
469 advk_writel(pcie
, 0, OB_WIN_MATCH_LS(win_num
));
470 advk_writel(pcie
, 0, OB_WIN_MATCH_MS(win_num
));
471 advk_writel(pcie
, 0, OB_WIN_REMAP_LS(win_num
));
472 advk_writel(pcie
, 0, OB_WIN_REMAP_MS(win_num
));
473 advk_writel(pcie
, 0, OB_WIN_MASK_LS(win_num
));
474 advk_writel(pcie
, 0, OB_WIN_MASK_MS(win_num
));
475 advk_writel(pcie
, 0, OB_WIN_ACTIONS(win_num
));
478 static void advk_pcie_setup_hw(struct advk_pcie
*pcie
)
480 phys_addr_t msi_addr
;
485 * Configure PCIe Reference clock. Direction is from the PCIe
486 * controller to the endpoint card, so enable transmitting of
487 * Reference clock differential signal off-chip and disable
488 * receiving off-chip differential signal.
490 reg
= advk_readl(pcie
, PCIE_CORE_REF_CLK_REG
);
491 reg
|= PCIE_CORE_REF_CLK_TX_ENABLE
;
492 reg
&= ~PCIE_CORE_REF_CLK_RX_ENABLE
;
493 advk_writel(pcie
, reg
, PCIE_CORE_REF_CLK_REG
);
495 /* Set to Direct mode */
496 reg
= advk_readl(pcie
, CTRL_CONFIG_REG
);
497 reg
&= ~(CTRL_MODE_MASK
<< CTRL_MODE_SHIFT
);
498 reg
|= ((PCIE_CORE_MODE_DIRECT
& CTRL_MODE_MASK
) << CTRL_MODE_SHIFT
);
499 advk_writel(pcie
, reg
, CTRL_CONFIG_REG
);
501 /* Set PCI global control register to RC mode */
502 reg
= advk_readl(pcie
, PCIE_CORE_CTRL0_REG
);
503 reg
|= (IS_RC_MSK
<< IS_RC_SHIFT
);
504 advk_writel(pcie
, reg
, PCIE_CORE_CTRL0_REG
);
507 * Replace incorrect PCI vendor id value 0x1b4b by correct value 0x11ab.
508 * VENDOR_ID_REG contains vendor id in low 16 bits and subsystem vendor
509 * id in high 16 bits. Updating this register changes readback value of
510 * read-only vendor id bits in PCIE_CORE_DEV_ID_REG register. Workaround
511 * for erratum 4.1: "The value of device and vendor ID is incorrect".
513 reg
= (PCI_VENDOR_ID_MARVELL
<< 16) | PCI_VENDOR_ID_MARVELL
;
514 advk_writel(pcie
, reg
, VENDOR_ID_REG
);
517 * Change Class Code of PCI Bridge device to PCI Bridge (0x600400),
518 * because the default value is Mass storage controller (0x010400).
520 * Note that this Aardvark PCI Bridge does not have compliant Type 1
521 * Configuration Space and it even cannot be accessed via Aardvark's
522 * PCI config space access method. Something like config space is
523 * available in internal Aardvark registers starting at offset 0x0
524 * and is reported as Type 0. In range 0x10 - 0x34 it has totally
525 * different registers.
527 * Therefore driver uses emulation of PCI Bridge which emulates
528 * access to configuration space via internal Aardvark registers or
529 * emulated configuration buffer.
531 reg
= advk_readl(pcie
, PCIE_CORE_DEV_REV_REG
);
533 reg
|= PCI_CLASS_BRIDGE_PCI_NORMAL
<< 8;
534 advk_writel(pcie
, reg
, PCIE_CORE_DEV_REV_REG
);
536 /* Disable Root Bridge I/O space, memory space and bus mastering */
537 reg
= advk_readl(pcie
, PCIE_CORE_CMD_STATUS_REG
);
538 reg
&= ~(PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
);
539 advk_writel(pcie
, reg
, PCIE_CORE_CMD_STATUS_REG
);
541 /* Set Advanced Error Capabilities and Control PF0 register */
542 reg
= PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX
|
543 PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN
|
544 PCIE_CORE_ERR_CAPCTL_ECRC_CHCK
|
545 PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV
;
546 advk_writel(pcie
, reg
, PCIE_CORE_ERR_CAPCTL_REG
);
548 /* Set PCIe Device Control register */
549 reg
= advk_readl(pcie
, PCIE_CORE_PCIEXP_CAP
+ PCI_EXP_DEVCTL
);
550 reg
&= ~PCI_EXP_DEVCTL_RELAX_EN
;
551 reg
&= ~PCI_EXP_DEVCTL_NOSNOOP_EN
;
552 reg
&= ~PCI_EXP_DEVCTL_PAYLOAD
;
553 reg
&= ~PCI_EXP_DEVCTL_READRQ
;
554 reg
|= PCI_EXP_DEVCTL_PAYLOAD_512B
;
555 reg
|= PCI_EXP_DEVCTL_READRQ_512B
;
556 advk_writel(pcie
, reg
, PCIE_CORE_PCIEXP_CAP
+ PCI_EXP_DEVCTL
);
558 /* Program PCIe Control 2 to disable strict ordering */
559 reg
= PCIE_CORE_CTRL2_RESERVED
|
560 PCIE_CORE_CTRL2_TD_ENABLE
;
561 advk_writel(pcie
, reg
, PCIE_CORE_CTRL2_REG
);
564 reg
= advk_readl(pcie
, PCIE_CORE_CTRL0_REG
);
565 reg
&= ~LANE_CNT_MSK
;
567 advk_writel(pcie
, reg
, PCIE_CORE_CTRL0_REG
);
569 /* Set MSI address */
570 msi_addr
= virt_to_phys(pcie
);
571 advk_writel(pcie
, lower_32_bits(msi_addr
), PCIE_MSI_ADDR_LOW_REG
);
572 advk_writel(pcie
, upper_32_bits(msi_addr
), PCIE_MSI_ADDR_HIGH_REG
);
575 reg
= advk_readl(pcie
, PCIE_CORE_CTRL2_REG
);
576 reg
|= PCIE_CORE_CTRL2_MSI_ENABLE
;
577 advk_writel(pcie
, reg
, PCIE_CORE_CTRL2_REG
);
579 /* Clear all interrupts */
580 advk_writel(pcie
, PCIE_MSI_ALL_MASK
, PCIE_MSI_STATUS_REG
);
581 advk_writel(pcie
, PCIE_ISR0_ALL_MASK
, PCIE_ISR0_REG
);
582 advk_writel(pcie
, PCIE_ISR1_ALL_MASK
, PCIE_ISR1_REG
);
583 advk_writel(pcie
, PCIE_IRQ_ALL_MASK
, HOST_CTRL_INT_STATUS_REG
);
585 /* Disable All ISR0/1 and MSI Sources */
586 advk_writel(pcie
, PCIE_ISR0_ALL_MASK
, PCIE_ISR0_MASK_REG
);
587 advk_writel(pcie
, PCIE_ISR1_ALL_MASK
, PCIE_ISR1_MASK_REG
);
588 advk_writel(pcie
, PCIE_MSI_ALL_MASK
, PCIE_MSI_MASK_REG
);
590 /* Unmask summary MSI interrupt */
591 reg
= advk_readl(pcie
, PCIE_ISR0_MASK_REG
);
592 reg
&= ~PCIE_ISR0_MSI_INT_PENDING
;
593 advk_writel(pcie
, reg
, PCIE_ISR0_MASK_REG
);
595 /* Unmask PME interrupt for processing of PME requester */
596 reg
= advk_readl(pcie
, PCIE_ISR0_MASK_REG
);
597 reg
&= ~PCIE_MSG_PM_PME_MASK
;
598 advk_writel(pcie
, reg
, PCIE_ISR0_MASK_REG
);
600 /* Enable summary interrupt for GIC SPI source */
601 reg
= PCIE_IRQ_ALL_MASK
& (~PCIE_IRQ_ENABLE_INTS_MASK
);
602 advk_writel(pcie
, reg
, HOST_CTRL_INT_MASK_REG
);
605 * Enable AXI address window location generation:
606 * When it is enabled, the default outbound window
607 * configurations (Default User Field: 0xD0074CFC)
608 * are used to transparent address translation for
609 * the outbound transactions. Thus, PCIe address
610 * windows are not required for transparent memory
611 * access when default outbound window configuration
612 * is set for memory access.
614 reg
= advk_readl(pcie
, PCIE_CORE_CTRL2_REG
);
615 reg
|= PCIE_CORE_CTRL2_OB_WIN_ENABLE
;
616 advk_writel(pcie
, reg
, PCIE_CORE_CTRL2_REG
);
619 * Set memory access in Default User Field so it
620 * is not required to configure PCIe address for
621 * transparent memory access.
623 advk_writel(pcie
, OB_WIN_TYPE_MEM
, OB_WIN_DEFAULT_ACTIONS
);
626 * Bypass the address window mapping for PIO:
627 * Since PIO access already contains all required
628 * info over AXI interface by PIO registers, the
629 * address window is not required.
631 reg
= advk_readl(pcie
, PIO_CTRL
);
632 reg
|= PIO_CTRL_ADDR_WIN_DISABLE
;
633 advk_writel(pcie
, reg
, PIO_CTRL
);
636 * Configure PCIe address windows for non-memory or
637 * non-transparent access as by default PCIe uses
638 * transparent memory access.
640 for (i
= 0; i
< pcie
->wins_count
; i
++)
641 advk_pcie_set_ob_win(pcie
, i
,
642 pcie
->wins
[i
].match
, pcie
->wins
[i
].remap
,
643 pcie
->wins
[i
].mask
, pcie
->wins
[i
].actions
);
645 /* Disable remaining PCIe outbound windows */
646 for (i
= pcie
->wins_count
; i
< OB_WIN_COUNT
; i
++)
647 advk_pcie_disable_ob_win(pcie
, i
);
649 advk_pcie_train_link(pcie
);
652 static int advk_pcie_check_pio_status(struct advk_pcie
*pcie
, bool allow_rrs
, u32
*val
)
654 struct device
*dev
= &pcie
->pdev
->dev
;
657 char *strcomp_status
, *str_posted
;
660 reg
= advk_readl(pcie
, PIO_STAT
);
661 status
= (reg
& PIO_COMPLETION_STATUS_MASK
) >>
662 PIO_COMPLETION_STATUS_SHIFT
;
665 * According to HW spec, the PIO status check sequence as below:
666 * 1) even if COMPLETION_STATUS(bit9:7) indicates successful,
667 * it still needs to check Error Status(bit11), only when this bit
668 * indicates no error happen, the operation is successful.
669 * 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
670 * means a PIO write error, and for PIO read it is successful with
671 * a read value of 0xFFFFFFFF.
672 * 3) value Config Request Retry Status(RRS) of COMPLETION_STATUS(bit9:7)
673 * only means a PIO write error, and for PIO read it is successful
674 * with a read value of 0xFFFF0001.
675 * 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
676 * error for both PIO read and PIO write operation.
677 * 5) other errors are indicated as 'unknown'.
680 case PIO_COMPLETION_STATUS_OK
:
681 if (reg
& PIO_ERR_STATUS
) {
682 strcomp_status
= "COMP_ERR";
686 /* Get the read result */
688 *val
= advk_readl(pcie
, PIO_RD_DATA
);
690 strcomp_status
= NULL
;
693 case PIO_COMPLETION_STATUS_UR
:
694 strcomp_status
= "UR";
697 case PIO_COMPLETION_STATUS_RRS
:
698 if (allow_rrs
&& val
) {
699 /* PCIe r6.0, sec 2.3.2, says:
700 * If Configuration RRS Software Visibility is enabled:
701 * For a Configuration Read Request that includes both
702 * bytes of the Vendor ID field of a device Function's
703 * Configuration Space Header, the Root Complex must
704 * complete the Request to the host by returning a
705 * read-data value of 0001h for the Vendor ID field and
706 * all '1's for any additional bytes included in the
709 * So RRS in this case is not an error status.
711 *val
= CFG_RD_RRS_VAL
;
712 strcomp_status
= NULL
;
716 /* PCIe r6.0, sec 2.3.2, says:
717 * If RRS Software Visibility is not enabled, the Root Complex
718 * must re-issue the Configuration Request as a new Request.
719 * If RRS Software Visibility is enabled: For a Configuration
720 * Write Request or for any other Configuration Read Request,
721 * the Root Complex must re-issue the Configuration Request as
723 * A Root Complex implementation may choose to limit the number
724 * of Configuration Request/RRS Completion Status loops before
725 * determining that something is wrong with the target of the
726 * Request and taking appropriate action, e.g., complete the
727 * Request to the host as a failed transaction.
729 * So return -EAGAIN and caller (pci-aardvark.c driver) will
730 * re-issue request again up to the PIO_RETRY_CNT retries.
732 strcomp_status
= "RRS";
735 case PIO_COMPLETION_STATUS_CA
:
736 strcomp_status
= "CA";
740 strcomp_status
= "Unknown";
748 if (reg
& PIO_NON_POSTED_REQ
)
749 str_posted
= "Non-posted";
751 str_posted
= "Posted";
753 dev_dbg(dev
, "%s PIO Response Status: %s, %#x @ %#x\n",
754 str_posted
, strcomp_status
, reg
, advk_readl(pcie
, PIO_ADDR_LS
));
759 static int advk_pcie_wait_pio(struct advk_pcie
*pcie
)
761 struct device
*dev
= &pcie
->pdev
->dev
;
764 for (i
= 1; i
<= PIO_RETRY_CNT
; i
++) {
767 start
= advk_readl(pcie
, PIO_START
);
768 isr
= advk_readl(pcie
, PIO_ISR
);
771 udelay(PIO_RETRY_DELAY
);
774 dev_err(dev
, "PIO read/write transfer time out\n");
778 static pci_bridge_emul_read_status_t
779 advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul
*bridge
,
782 struct advk_pcie
*pcie
= bridge
->data
;
786 *value
= advk_readl(pcie
, PCIE_CORE_CMD_STATUS_REG
);
787 return PCI_BRIDGE_EMUL_HANDLED
;
789 case PCI_INTERRUPT_LINE
: {
791 * From the whole 32bit register we support reading from HW only
792 * two bits: PCI_BRIDGE_CTL_BUS_RESET and PCI_BRIDGE_CTL_SERR.
793 * Other bits are retrieved only from emulated config buffer.
795 __le32
*cfgspace
= (__le32
*)&bridge
->conf
;
796 u32 val
= le32_to_cpu(cfgspace
[PCI_INTERRUPT_LINE
/ 4]);
797 if (advk_readl(pcie
, PCIE_ISR0_MASK_REG
) & PCIE_ISR0_ERR_MASK
)
798 val
&= ~(PCI_BRIDGE_CTL_SERR
<< 16);
800 val
|= PCI_BRIDGE_CTL_SERR
<< 16;
801 if (advk_readl(pcie
, PCIE_CORE_CTRL1_REG
) & HOT_RESET_GEN
)
802 val
|= PCI_BRIDGE_CTL_BUS_RESET
<< 16;
804 val
&= ~(PCI_BRIDGE_CTL_BUS_RESET
<< 16);
806 return PCI_BRIDGE_EMUL_HANDLED
;
810 return PCI_BRIDGE_EMUL_NOT_HANDLED
;
815 advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul
*bridge
,
816 int reg
, u32 old
, u32
new, u32 mask
)
818 struct advk_pcie
*pcie
= bridge
->data
;
822 advk_writel(pcie
, new, PCIE_CORE_CMD_STATUS_REG
);
825 case PCI_INTERRUPT_LINE
:
827 * According to Figure 6-3: Pseudo Logic Diagram for Error
828 * Message Controls in PCIe base specification, SERR# Enable bit
829 * in Bridge Control register enable receiving of ERR_* messages
831 if (mask
& (PCI_BRIDGE_CTL_SERR
<< 16)) {
832 u32 val
= advk_readl(pcie
, PCIE_ISR0_MASK_REG
);
833 if (new & (PCI_BRIDGE_CTL_SERR
<< 16))
834 val
&= ~PCIE_ISR0_ERR_MASK
;
836 val
|= PCIE_ISR0_ERR_MASK
;
837 advk_writel(pcie
, val
, PCIE_ISR0_MASK_REG
);
839 if (mask
& (PCI_BRIDGE_CTL_BUS_RESET
<< 16)) {
840 u32 val
= advk_readl(pcie
, PCIE_CORE_CTRL1_REG
);
841 if (new & (PCI_BRIDGE_CTL_BUS_RESET
<< 16))
842 val
|= HOT_RESET_GEN
;
844 val
&= ~HOT_RESET_GEN
;
845 advk_writel(pcie
, val
, PCIE_CORE_CTRL1_REG
);
854 static pci_bridge_emul_read_status_t
855 advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul
*bridge
,
858 struct advk_pcie
*pcie
= bridge
->data
;
863 * PCI_EXP_SLTCAP, PCI_EXP_SLTCTL, PCI_EXP_RTCTL and PCI_EXP_RTSTA are
864 * also supported, but do not need to be handled here, because their
865 * values are stored in emulated config space buffer, and we read them
866 * from there when needed.
869 case PCI_EXP_LNKCAP
: {
870 u32 val
= advk_readl(pcie
, PCIE_CORE_PCIEXP_CAP
+ reg
);
872 * PCI_EXP_LNKCAP_DLLLARC bit is hardwired in aardvark HW to 0.
873 * But support for PCI_EXP_LNKSTA_DLLLA is emulated via ltssm
874 * state so explicitly enable PCI_EXP_LNKCAP_DLLLARC flag.
876 val
|= PCI_EXP_LNKCAP_DLLLARC
;
878 return PCI_BRIDGE_EMUL_HANDLED
;
881 case PCI_EXP_LNKCTL
: {
882 /* u32 contains both PCI_EXP_LNKCTL and PCI_EXP_LNKSTA */
883 u32 val
= advk_readl(pcie
, PCIE_CORE_PCIEXP_CAP
+ reg
) &
884 ~(PCI_EXP_LNKSTA_LT
<< 16);
885 if (advk_pcie_link_training(pcie
))
886 val
|= (PCI_EXP_LNKSTA_LT
<< 16);
887 if (advk_pcie_link_active(pcie
))
888 val
|= (PCI_EXP_LNKSTA_DLLLA
<< 16);
890 return PCI_BRIDGE_EMUL_HANDLED
;
895 case PCI_EXP_DEVCAP2
:
896 case PCI_EXP_DEVCTL2
:
897 case PCI_EXP_LNKCAP2
:
898 case PCI_EXP_LNKCTL2
:
899 *value
= advk_readl(pcie
, PCIE_CORE_PCIEXP_CAP
+ reg
);
900 return PCI_BRIDGE_EMUL_HANDLED
;
903 return PCI_BRIDGE_EMUL_NOT_HANDLED
;
909 advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul
*bridge
,
910 int reg
, u32 old
, u32
new, u32 mask
)
912 struct advk_pcie
*pcie
= bridge
->data
;
916 advk_writel(pcie
, new, PCIE_CORE_PCIEXP_CAP
+ reg
);
917 if (new & PCI_EXP_LNKCTL_RL
)
918 advk_pcie_wait_for_retrain(pcie
);
921 case PCI_EXP_RTCTL
: {
922 u16 rootctl
= le16_to_cpu(bridge
->pcie_conf
.rootctl
);
923 /* Only emulation of PMEIE and RRS_SVE bits is provided */
924 rootctl
&= PCI_EXP_RTCTL_PMEIE
| PCI_EXP_RTCTL_RRS_SVE
;
925 bridge
->pcie_conf
.rootctl
= cpu_to_le16(rootctl
);
930 * PCI_EXP_RTSTA is also supported, but does not need to be handled
931 * here, because its value is stored in emulated config space buffer,
932 * and we write it there when needed.
936 case PCI_EXP_DEVCTL2
:
937 case PCI_EXP_LNKCTL2
:
938 advk_writel(pcie
, new, PCIE_CORE_PCIEXP_CAP
+ reg
);
946 static pci_bridge_emul_read_status_t
947 advk_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul
*bridge
,
950 struct advk_pcie
*pcie
= bridge
->data
;
954 *value
= advk_readl(pcie
, PCIE_CORE_PCIERR_CAP
+ reg
);
957 * PCI_EXT_CAP_NEXT bits are set to offset 0x150, but Armada
958 * 3700 Functional Specification does not document registers
959 * at those addresses.
961 * Thus we clear PCI_EXT_CAP_NEXT bits to make Advanced Error
962 * Reporting Capability header the last Extended Capability.
963 * If we obtain documentation for those registers in the
964 * future, this can be changed.
966 *value
&= 0x000fffff;
967 return PCI_BRIDGE_EMUL_HANDLED
;
969 case PCI_ERR_UNCOR_STATUS
:
970 case PCI_ERR_UNCOR_MASK
:
971 case PCI_ERR_UNCOR_SEVER
:
972 case PCI_ERR_COR_STATUS
:
973 case PCI_ERR_COR_MASK
:
975 case PCI_ERR_HEADER_LOG
+ 0:
976 case PCI_ERR_HEADER_LOG
+ 4:
977 case PCI_ERR_HEADER_LOG
+ 8:
978 case PCI_ERR_HEADER_LOG
+ 12:
979 case PCI_ERR_ROOT_COMMAND
:
980 case PCI_ERR_ROOT_STATUS
:
981 case PCI_ERR_ROOT_ERR_SRC
:
982 *value
= advk_readl(pcie
, PCIE_CORE_PCIERR_CAP
+ reg
);
983 return PCI_BRIDGE_EMUL_HANDLED
;
986 return PCI_BRIDGE_EMUL_NOT_HANDLED
;
991 advk_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul
*bridge
,
992 int reg
, u32 old
, u32
new, u32 mask
)
994 struct advk_pcie
*pcie
= bridge
->data
;
997 /* These are W1C registers, so clear other bits */
998 case PCI_ERR_UNCOR_STATUS
:
999 case PCI_ERR_COR_STATUS
:
1000 case PCI_ERR_ROOT_STATUS
:
1004 case PCI_ERR_UNCOR_MASK
:
1005 case PCI_ERR_UNCOR_SEVER
:
1006 case PCI_ERR_COR_MASK
:
1008 case PCI_ERR_HEADER_LOG
+ 0:
1009 case PCI_ERR_HEADER_LOG
+ 4:
1010 case PCI_ERR_HEADER_LOG
+ 8:
1011 case PCI_ERR_HEADER_LOG
+ 12:
1012 case PCI_ERR_ROOT_COMMAND
:
1013 case PCI_ERR_ROOT_ERR_SRC
:
1014 advk_writel(pcie
, new, PCIE_CORE_PCIERR_CAP
+ reg
);
1022 static const struct pci_bridge_emul_ops advk_pci_bridge_emul_ops
= {
1023 .read_base
= advk_pci_bridge_emul_base_conf_read
,
1024 .write_base
= advk_pci_bridge_emul_base_conf_write
,
1025 .read_pcie
= advk_pci_bridge_emul_pcie_conf_read
,
1026 .write_pcie
= advk_pci_bridge_emul_pcie_conf_write
,
1027 .read_ext
= advk_pci_bridge_emul_ext_conf_read
,
1028 .write_ext
= advk_pci_bridge_emul_ext_conf_write
,
1032 * Initialize the configuration space of the PCI-to-PCI bridge
1033 * associated with the given PCIe interface.
1035 static int advk_sw_pci_bridge_init(struct advk_pcie
*pcie
)
1037 struct pci_bridge_emul
*bridge
= &pcie
->bridge
;
1039 bridge
->conf
.vendor
=
1040 cpu_to_le16(advk_readl(pcie
, PCIE_CORE_DEV_ID_REG
) & 0xffff);
1041 bridge
->conf
.device
=
1042 cpu_to_le16(advk_readl(pcie
, PCIE_CORE_DEV_ID_REG
) >> 16);
1043 bridge
->conf
.class_revision
=
1044 cpu_to_le32(advk_readl(pcie
, PCIE_CORE_DEV_REV_REG
) & 0xff);
1046 /* Support 32 bits I/O addressing */
1047 bridge
->conf
.iobase
= PCI_IO_RANGE_TYPE_32
;
1048 bridge
->conf
.iolimit
= PCI_IO_RANGE_TYPE_32
;
1050 /* Support 64 bits memory pref */
1051 bridge
->conf
.pref_mem_base
= cpu_to_le16(PCI_PREF_RANGE_TYPE_64
);
1052 bridge
->conf
.pref_mem_limit
= cpu_to_le16(PCI_PREF_RANGE_TYPE_64
);
1054 /* Support interrupt A for MSI feature */
1055 bridge
->conf
.intpin
= PCI_INTERRUPT_INTA
;
1058 * Aardvark HW provides PCIe Capability structure in version 2 and
1059 * indicate slot support, which is emulated.
1061 bridge
->pcie_conf
.cap
= cpu_to_le16(2 | PCI_EXP_FLAGS_SLOT
);
1064 * Set Presence Detect State bit permanently since there is no support
1065 * for unplugging the card nor detecting whether it is plugged. (If a
1066 * platform exists in the future that supports it, via a GPIO for
1067 * example, it should be implemented via this bit.)
1069 * Set physical slot number to 1 since there is only one port and zero
1070 * value is reserved for ports within the same silicon as Root Port
1071 * which is not our case.
1073 bridge
->pcie_conf
.slotcap
= cpu_to_le32(FIELD_PREP(PCI_EXP_SLTCAP_PSN
,
1075 bridge
->pcie_conf
.slotsta
= cpu_to_le16(PCI_EXP_SLTSTA_PDS
);
1077 /* Indicates supports for Completion Retry Status */
1078 bridge
->pcie_conf
.rootcap
= cpu_to_le16(PCI_EXP_RTCAP_RRS_SV
);
1080 bridge
->subsystem_vendor_id
= advk_readl(pcie
, PCIE_CORE_SSDEV_ID_REG
) & 0xffff;
1081 bridge
->subsystem_id
= advk_readl(pcie
, PCIE_CORE_SSDEV_ID_REG
) >> 16;
1082 bridge
->has_pcie
= true;
1083 bridge
->pcie_start
= PCIE_CORE_PCIEXP_CAP
;
1084 bridge
->data
= pcie
;
1085 bridge
->ops
= &advk_pci_bridge_emul_ops
;
1087 return pci_bridge_emul_init(bridge
, 0);
1090 static bool advk_pcie_valid_device(struct advk_pcie
*pcie
, struct pci_bus
*bus
,
1093 if (pci_is_root_bus(bus
) && PCI_SLOT(devfn
) != 0)
1097 * If the link goes down after we check for link-up, we have a problem:
1098 * if a PIO request is executed while link-down, the whole controller
1099 * gets stuck in a non-functional state, and even after link comes up
1100 * again, PIO requests won't work anymore, and a reset of the whole PCIe
1101 * controller is needed. Therefore we need to prevent sending PIO
1102 * requests while the link is down.
1104 if (!pci_is_root_bus(bus
) && !advk_pcie_link_up(pcie
))
1110 static bool advk_pcie_pio_is_running(struct advk_pcie
*pcie
)
1112 struct device
*dev
= &pcie
->pdev
->dev
;
1115 * Trying to start a new PIO transfer when previous has not completed
1116 * cause External Abort on CPU which results in kernel panic:
1118 * SError Interrupt on CPU0, code 0xbf000002 -- SError
1119 * Kernel panic - not syncing: Asynchronous SError Interrupt
1121 * Functions advk_pcie_rd_conf() and advk_pcie_wr_conf() are protected
1122 * by raw_spin_lock_irqsave() at pci_lock_config() level to prevent
1123 * concurrent calls at the same time. But because PIO transfer may take
1124 * about 1.5s when link is down or card is disconnected, it means that
1125 * advk_pcie_wait_pio() does not always have to wait for completion.
1127 * Some versions of ARM Trusted Firmware handles this External Abort at
1128 * EL3 level and mask it to prevent kernel panic. Relevant TF-A commit:
1129 * https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=3c7dcdac5c50
1131 if (advk_readl(pcie
, PIO_START
)) {
1132 dev_err(dev
, "Previous PIO read/write transfer is still running\n");
1139 static int advk_pcie_rd_conf(struct pci_bus
*bus
, u32 devfn
,
1140 int where
, int size
, u32
*val
)
1142 struct advk_pcie
*pcie
= bus
->sysdata
;
1148 if (!advk_pcie_valid_device(pcie
, bus
, devfn
))
1149 return PCIBIOS_DEVICE_NOT_FOUND
;
1151 if (pci_is_root_bus(bus
))
1152 return pci_bridge_emul_conf_read(&pcie
->bridge
, where
,
1156 * Configuration Request Retry Status (RRS) is possible to return
1157 * only when reading both bytes from PCI_VENDOR_ID at once and
1158 * RRS_SVE flag on Root Port is enabled.
1160 allow_rrs
= (where
== PCI_VENDOR_ID
) && (size
>= 2) &&
1161 (le16_to_cpu(pcie
->bridge
.pcie_conf
.rootctl
) &
1162 PCI_EXP_RTCTL_RRS_SVE
);
1164 if (advk_pcie_pio_is_running(pcie
))
1167 /* Program the control register */
1168 reg
= advk_readl(pcie
, PIO_CTRL
);
1169 reg
&= ~PIO_CTRL_TYPE_MASK
;
1170 if (pci_is_root_bus(bus
->parent
))
1171 reg
|= PCIE_CONFIG_RD_TYPE0
;
1173 reg
|= PCIE_CONFIG_RD_TYPE1
;
1174 advk_writel(pcie
, reg
, PIO_CTRL
);
1176 /* Program the address registers */
1177 reg
= ALIGN_DOWN(PCIE_ECAM_OFFSET(bus
->number
, devfn
, where
), 4);
1178 advk_writel(pcie
, reg
, PIO_ADDR_LS
);
1179 advk_writel(pcie
, 0, PIO_ADDR_MS
);
1181 /* Program the data strobe */
1182 advk_writel(pcie
, 0xf, PIO_WR_DATA_STRB
);
1186 /* Clear PIO DONE ISR and start the transfer */
1187 advk_writel(pcie
, 1, PIO_ISR
);
1188 advk_writel(pcie
, 1, PIO_START
);
1190 ret
= advk_pcie_wait_pio(pcie
);
1196 /* Check PIO status and get the read result */
1197 ret
= advk_pcie_check_pio_status(pcie
, allow_rrs
, val
);
1198 } while (ret
== -EAGAIN
&& retry_count
< PIO_RETRY_CNT
);
1204 *val
= (*val
>> (8 * (where
& 3))) & 0xff;
1206 *val
= (*val
>> (8 * (where
& 3))) & 0xffff;
1208 return PCIBIOS_SUCCESSFUL
;
1212 * If it is possible, return Configuration Request Retry Status so
1213 * that caller tries to issue the request again instead of failing.
1216 *val
= CFG_RD_RRS_VAL
;
1217 return PCIBIOS_SUCCESSFUL
;
1222 return PCIBIOS_SET_FAILED
;
1225 static int advk_pcie_wr_conf(struct pci_bus
*bus
, u32 devfn
,
1226 int where
, int size
, u32 val
)
1228 struct advk_pcie
*pcie
= bus
->sysdata
;
1230 u32 data_strobe
= 0x0;
1235 if (!advk_pcie_valid_device(pcie
, bus
, devfn
))
1236 return PCIBIOS_DEVICE_NOT_FOUND
;
1238 if (pci_is_root_bus(bus
))
1239 return pci_bridge_emul_conf_write(&pcie
->bridge
, where
,
1243 return PCIBIOS_SET_FAILED
;
1245 if (advk_pcie_pio_is_running(pcie
))
1246 return PCIBIOS_SET_FAILED
;
1248 /* Program the control register */
1249 reg
= advk_readl(pcie
, PIO_CTRL
);
1250 reg
&= ~PIO_CTRL_TYPE_MASK
;
1251 if (pci_is_root_bus(bus
->parent
))
1252 reg
|= PCIE_CONFIG_WR_TYPE0
;
1254 reg
|= PCIE_CONFIG_WR_TYPE1
;
1255 advk_writel(pcie
, reg
, PIO_CTRL
);
1257 /* Program the address registers */
1258 reg
= ALIGN_DOWN(PCIE_ECAM_OFFSET(bus
->number
, devfn
, where
), 4);
1259 advk_writel(pcie
, reg
, PIO_ADDR_LS
);
1260 advk_writel(pcie
, 0, PIO_ADDR_MS
);
1262 /* Calculate the write strobe */
1263 offset
= where
& 0x3;
1264 reg
= val
<< (8 * offset
);
1265 data_strobe
= GENMASK(size
- 1, 0) << offset
;
1267 /* Program the data register */
1268 advk_writel(pcie
, reg
, PIO_WR_DATA
);
1270 /* Program the data strobe */
1271 advk_writel(pcie
, data_strobe
, PIO_WR_DATA_STRB
);
1275 /* Clear PIO DONE ISR and start the transfer */
1276 advk_writel(pcie
, 1, PIO_ISR
);
1277 advk_writel(pcie
, 1, PIO_START
);
1279 ret
= advk_pcie_wait_pio(pcie
);
1281 return PCIBIOS_SET_FAILED
;
1285 ret
= advk_pcie_check_pio_status(pcie
, false, NULL
);
1286 } while (ret
== -EAGAIN
&& retry_count
< PIO_RETRY_CNT
);
1288 return ret
< 0 ? PCIBIOS_SET_FAILED
: PCIBIOS_SUCCESSFUL
;
1291 static struct pci_ops advk_pcie_ops
= {
1292 .read
= advk_pcie_rd_conf
,
1293 .write
= advk_pcie_wr_conf
,
1296 static void advk_msi_irq_compose_msi_msg(struct irq_data
*data
,
1297 struct msi_msg
*msg
)
1299 struct advk_pcie
*pcie
= irq_data_get_irq_chip_data(data
);
1300 phys_addr_t msi_addr
= virt_to_phys(pcie
);
1302 msg
->address_lo
= lower_32_bits(msi_addr
);
1303 msg
->address_hi
= upper_32_bits(msi_addr
);
1304 msg
->data
= data
->hwirq
;
1307 static void advk_msi_irq_mask(struct irq_data
*d
)
1309 struct advk_pcie
*pcie
= d
->domain
->host_data
;
1310 irq_hw_number_t hwirq
= irqd_to_hwirq(d
);
1311 unsigned long flags
;
1314 raw_spin_lock_irqsave(&pcie
->msi_irq_lock
, flags
);
1315 mask
= advk_readl(pcie
, PCIE_MSI_MASK_REG
);
1317 advk_writel(pcie
, mask
, PCIE_MSI_MASK_REG
);
1318 raw_spin_unlock_irqrestore(&pcie
->msi_irq_lock
, flags
);
1321 static void advk_msi_irq_unmask(struct irq_data
*d
)
1323 struct advk_pcie
*pcie
= d
->domain
->host_data
;
1324 irq_hw_number_t hwirq
= irqd_to_hwirq(d
);
1325 unsigned long flags
;
1328 raw_spin_lock_irqsave(&pcie
->msi_irq_lock
, flags
);
1329 mask
= advk_readl(pcie
, PCIE_MSI_MASK_REG
);
1330 mask
&= ~BIT(hwirq
);
1331 advk_writel(pcie
, mask
, PCIE_MSI_MASK_REG
);
1332 raw_spin_unlock_irqrestore(&pcie
->msi_irq_lock
, flags
);
1335 static void advk_msi_top_irq_mask(struct irq_data
*d
)
1337 pci_msi_mask_irq(d
);
1338 irq_chip_mask_parent(d
);
1341 static void advk_msi_top_irq_unmask(struct irq_data
*d
)
1343 pci_msi_unmask_irq(d
);
1344 irq_chip_unmask_parent(d
);
1347 static struct irq_chip advk_msi_bottom_irq_chip
= {
1349 .irq_compose_msi_msg
= advk_msi_irq_compose_msi_msg
,
1350 .irq_mask
= advk_msi_irq_mask
,
1351 .irq_unmask
= advk_msi_irq_unmask
,
1354 static int advk_msi_irq_domain_alloc(struct irq_domain
*domain
,
1356 unsigned int nr_irqs
, void *args
)
1358 struct advk_pcie
*pcie
= domain
->host_data
;
1361 mutex_lock(&pcie
->msi_used_lock
);
1362 hwirq
= bitmap_find_free_region(pcie
->msi_used
, MSI_IRQ_NUM
,
1363 order_base_2(nr_irqs
));
1364 mutex_unlock(&pcie
->msi_used_lock
);
1368 for (i
= 0; i
< nr_irqs
; i
++)
1369 irq_domain_set_info(domain
, virq
+ i
, hwirq
+ i
,
1370 &advk_msi_bottom_irq_chip
,
1371 domain
->host_data
, handle_simple_irq
,
1377 static void advk_msi_irq_domain_free(struct irq_domain
*domain
,
1378 unsigned int virq
, unsigned int nr_irqs
)
1380 struct irq_data
*d
= irq_domain_get_irq_data(domain
, virq
);
1381 struct advk_pcie
*pcie
= domain
->host_data
;
1383 mutex_lock(&pcie
->msi_used_lock
);
1384 bitmap_release_region(pcie
->msi_used
, d
->hwirq
, order_base_2(nr_irqs
));
1385 mutex_unlock(&pcie
->msi_used_lock
);
1388 static const struct irq_domain_ops advk_msi_domain_ops
= {
1389 .alloc
= advk_msi_irq_domain_alloc
,
1390 .free
= advk_msi_irq_domain_free
,
1393 static void advk_pcie_irq_mask(struct irq_data
*d
)
1395 struct advk_pcie
*pcie
= d
->domain
->host_data
;
1396 irq_hw_number_t hwirq
= irqd_to_hwirq(d
);
1397 unsigned long flags
;
1400 raw_spin_lock_irqsave(&pcie
->irq_lock
, flags
);
1401 mask
= advk_readl(pcie
, PCIE_ISR1_MASK_REG
);
1402 mask
|= PCIE_ISR1_INTX_ASSERT(hwirq
);
1403 advk_writel(pcie
, mask
, PCIE_ISR1_MASK_REG
);
1404 raw_spin_unlock_irqrestore(&pcie
->irq_lock
, flags
);
1407 static void advk_pcie_irq_unmask(struct irq_data
*d
)
1409 struct advk_pcie
*pcie
= d
->domain
->host_data
;
1410 irq_hw_number_t hwirq
= irqd_to_hwirq(d
);
1411 unsigned long flags
;
1414 raw_spin_lock_irqsave(&pcie
->irq_lock
, flags
);
1415 mask
= advk_readl(pcie
, PCIE_ISR1_MASK_REG
);
1416 mask
&= ~PCIE_ISR1_INTX_ASSERT(hwirq
);
1417 advk_writel(pcie
, mask
, PCIE_ISR1_MASK_REG
);
1418 raw_spin_unlock_irqrestore(&pcie
->irq_lock
, flags
);
1421 static int advk_pcie_irq_map(struct irq_domain
*h
,
1422 unsigned int virq
, irq_hw_number_t hwirq
)
1424 struct advk_pcie
*pcie
= h
->host_data
;
1426 irq_set_status_flags(virq
, IRQ_LEVEL
);
1427 irq_set_chip_and_handler(virq
, &pcie
->irq_chip
,
1429 irq_set_chip_data(virq
, pcie
);
1434 static const struct irq_domain_ops advk_pcie_irq_domain_ops
= {
1435 .map
= advk_pcie_irq_map
,
1436 .xlate
= irq_domain_xlate_onecell
,
1439 static struct irq_chip advk_msi_irq_chip
= {
1441 .irq_mask
= advk_msi_top_irq_mask
,
1442 .irq_unmask
= advk_msi_top_irq_unmask
,
1445 static struct msi_domain_info advk_msi_domain_info
= {
1446 .flags
= MSI_FLAG_USE_DEF_DOM_OPS
| MSI_FLAG_USE_DEF_CHIP_OPS
|
1447 MSI_FLAG_NO_AFFINITY
| MSI_FLAG_MULTI_PCI_MSI
|
1449 .chip
= &advk_msi_irq_chip
,
1452 static int advk_pcie_init_msi_irq_domain(struct advk_pcie
*pcie
)
1454 struct device
*dev
= &pcie
->pdev
->dev
;
1456 raw_spin_lock_init(&pcie
->msi_irq_lock
);
1457 mutex_init(&pcie
->msi_used_lock
);
1459 pcie
->msi_inner_domain
=
1460 irq_domain_add_linear(NULL
, MSI_IRQ_NUM
,
1461 &advk_msi_domain_ops
, pcie
);
1462 if (!pcie
->msi_inner_domain
)
1466 pci_msi_create_irq_domain(dev_fwnode(dev
),
1467 &advk_msi_domain_info
,
1468 pcie
->msi_inner_domain
);
1469 if (!pcie
->msi_domain
) {
1470 irq_domain_remove(pcie
->msi_inner_domain
);
1477 static void advk_pcie_remove_msi_irq_domain(struct advk_pcie
*pcie
)
1479 irq_domain_remove(pcie
->msi_domain
);
1480 irq_domain_remove(pcie
->msi_inner_domain
);
1483 static int advk_pcie_init_irq_domain(struct advk_pcie
*pcie
)
1485 struct device
*dev
= &pcie
->pdev
->dev
;
1486 struct device_node
*node
= dev
->of_node
;
1487 struct device_node
*pcie_intc_node
;
1488 struct irq_chip
*irq_chip
;
1491 raw_spin_lock_init(&pcie
->irq_lock
);
1493 pcie_intc_node
= of_get_next_child(node
, NULL
);
1494 if (!pcie_intc_node
) {
1495 dev_err(dev
, "No PCIe Intc node found\n");
1499 irq_chip
= &pcie
->irq_chip
;
1501 irq_chip
->name
= devm_kasprintf(dev
, GFP_KERNEL
, "%s-irq",
1503 if (!irq_chip
->name
) {
1508 irq_chip
->irq_mask
= advk_pcie_irq_mask
;
1509 irq_chip
->irq_unmask
= advk_pcie_irq_unmask
;
1512 irq_domain_add_linear(pcie_intc_node
, PCI_NUM_INTX
,
1513 &advk_pcie_irq_domain_ops
, pcie
);
1514 if (!pcie
->irq_domain
) {
1515 dev_err(dev
, "Failed to get a INTx IRQ domain\n");
1521 of_node_put(pcie_intc_node
);
1525 static void advk_pcie_remove_irq_domain(struct advk_pcie
*pcie
)
1527 irq_domain_remove(pcie
->irq_domain
);
1530 static struct irq_chip advk_rp_irq_chip
= {
1534 static int advk_pcie_rp_irq_map(struct irq_domain
*h
,
1535 unsigned int virq
, irq_hw_number_t hwirq
)
1537 struct advk_pcie
*pcie
= h
->host_data
;
1539 irq_set_chip_and_handler(virq
, &advk_rp_irq_chip
, handle_simple_irq
);
1540 irq_set_chip_data(virq
, pcie
);
1545 static const struct irq_domain_ops advk_pcie_rp_irq_domain_ops
= {
1546 .map
= advk_pcie_rp_irq_map
,
1547 .xlate
= irq_domain_xlate_onecell
,
1550 static int advk_pcie_init_rp_irq_domain(struct advk_pcie
*pcie
)
1552 pcie
->rp_irq_domain
= irq_domain_add_linear(NULL
, 1,
1553 &advk_pcie_rp_irq_domain_ops
,
1555 if (!pcie
->rp_irq_domain
) {
1556 dev_err(&pcie
->pdev
->dev
, "Failed to add Root Port IRQ domain\n");
1563 static void advk_pcie_remove_rp_irq_domain(struct advk_pcie
*pcie
)
1565 irq_domain_remove(pcie
->rp_irq_domain
);
1568 static void advk_pcie_handle_pme(struct advk_pcie
*pcie
)
1570 u32 requester
= advk_readl(pcie
, PCIE_MSG_LOG_REG
) >> 16;
1572 advk_writel(pcie
, PCIE_MSG_PM_PME_MASK
, PCIE_ISR0_REG
);
1575 * PCIE_MSG_LOG_REG contains the last inbound message, so store
1576 * the requester ID only when PME was not asserted yet.
1577 * Also do not trigger PME interrupt when PME is still asserted.
1579 if (!(le32_to_cpu(pcie
->bridge
.pcie_conf
.rootsta
) & PCI_EXP_RTSTA_PME
)) {
1580 pcie
->bridge
.pcie_conf
.rootsta
= cpu_to_le32(requester
| PCI_EXP_RTSTA_PME
);
1583 * Trigger PME interrupt only if PMEIE bit in Root Control is set.
1584 * Aardvark HW returns zero for PCI_EXP_FLAGS_IRQ, so use PCIe interrupt 0.
1586 if (!(le16_to_cpu(pcie
->bridge
.pcie_conf
.rootctl
) & PCI_EXP_RTCTL_PMEIE
))
1589 if (generic_handle_domain_irq(pcie
->rp_irq_domain
, 0) == -EINVAL
)
1590 dev_err_ratelimited(&pcie
->pdev
->dev
, "unhandled PME IRQ\n");
1594 static void advk_pcie_handle_msi(struct advk_pcie
*pcie
)
1596 u32 msi_val
, msi_mask
, msi_status
, msi_idx
;
1598 msi_mask
= advk_readl(pcie
, PCIE_MSI_MASK_REG
);
1599 msi_val
= advk_readl(pcie
, PCIE_MSI_STATUS_REG
);
1600 msi_status
= msi_val
& ((~msi_mask
) & PCIE_MSI_ALL_MASK
);
1602 for (msi_idx
= 0; msi_idx
< MSI_IRQ_NUM
; msi_idx
++) {
1603 if (!(BIT(msi_idx
) & msi_status
))
1606 advk_writel(pcie
, BIT(msi_idx
), PCIE_MSI_STATUS_REG
);
1607 if (generic_handle_domain_irq(pcie
->msi_inner_domain
, msi_idx
) == -EINVAL
)
1608 dev_err_ratelimited(&pcie
->pdev
->dev
, "unexpected MSI 0x%02x\n", msi_idx
);
1611 advk_writel(pcie
, PCIE_ISR0_MSI_INT_PENDING
,
1615 static void advk_pcie_handle_int(struct advk_pcie
*pcie
)
1617 u32 isr0_val
, isr0_mask
, isr0_status
;
1618 u32 isr1_val
, isr1_mask
, isr1_status
;
1621 isr0_val
= advk_readl(pcie
, PCIE_ISR0_REG
);
1622 isr0_mask
= advk_readl(pcie
, PCIE_ISR0_MASK_REG
);
1623 isr0_status
= isr0_val
& ((~isr0_mask
) & PCIE_ISR0_ALL_MASK
);
1625 isr1_val
= advk_readl(pcie
, PCIE_ISR1_REG
);
1626 isr1_mask
= advk_readl(pcie
, PCIE_ISR1_MASK_REG
);
1627 isr1_status
= isr1_val
& ((~isr1_mask
) & PCIE_ISR1_ALL_MASK
);
1629 /* Process PME interrupt as the first one to do not miss PME requester id */
1630 if (isr0_status
& PCIE_MSG_PM_PME_MASK
)
1631 advk_pcie_handle_pme(pcie
);
1633 /* Process ERR interrupt */
1634 if (isr0_status
& PCIE_ISR0_ERR_MASK
) {
1635 advk_writel(pcie
, PCIE_ISR0_ERR_MASK
, PCIE_ISR0_REG
);
1638 * Aardvark HW returns zero for PCI_ERR_ROOT_AER_IRQ, so use
1641 if (generic_handle_domain_irq(pcie
->rp_irq_domain
, 0) == -EINVAL
)
1642 dev_err_ratelimited(&pcie
->pdev
->dev
, "unhandled ERR IRQ\n");
1645 /* Process MSI interrupts */
1646 if (isr0_status
& PCIE_ISR0_MSI_INT_PENDING
)
1647 advk_pcie_handle_msi(pcie
);
1649 /* Process legacy interrupts */
1650 for (i
= 0; i
< PCI_NUM_INTX
; i
++) {
1651 if (!(isr1_status
& PCIE_ISR1_INTX_ASSERT(i
)))
1654 advk_writel(pcie
, PCIE_ISR1_INTX_ASSERT(i
),
1657 if (generic_handle_domain_irq(pcie
->irq_domain
, i
) == -EINVAL
)
1658 dev_err_ratelimited(&pcie
->pdev
->dev
, "unexpected INT%c IRQ\n",
1663 static irqreturn_t
advk_pcie_irq_handler(int irq
, void *arg
)
1665 struct advk_pcie
*pcie
= arg
;
1668 status
= advk_readl(pcie
, HOST_CTRL_INT_STATUS_REG
);
1669 if (!(status
& PCIE_IRQ_CORE_INT
))
1672 advk_pcie_handle_int(pcie
);
1674 /* Clear interrupt */
1675 advk_writel(pcie
, PCIE_IRQ_CORE_INT
, HOST_CTRL_INT_STATUS_REG
);
1680 static int advk_pcie_map_irq(const struct pci_dev
*dev
, u8 slot
, u8 pin
)
1682 struct advk_pcie
*pcie
= dev
->bus
->sysdata
;
1685 * Emulated root bridge has its own emulated irq chip and irq domain.
1686 * Argument pin is the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD) and
1687 * hwirq for irq_create_mapping() is indexed from zero.
1689 if (pci_is_root_bus(dev
->bus
))
1690 return irq_create_mapping(pcie
->rp_irq_domain
, pin
- 1);
1692 return of_irq_parse_and_map_pci(dev
, slot
, pin
);
1695 static void advk_pcie_disable_phy(struct advk_pcie
*pcie
)
1697 phy_power_off(pcie
->phy
);
1698 phy_exit(pcie
->phy
);
1701 static int advk_pcie_enable_phy(struct advk_pcie
*pcie
)
1708 ret
= phy_init(pcie
->phy
);
1712 ret
= phy_set_mode(pcie
->phy
, PHY_MODE_PCIE
);
1714 phy_exit(pcie
->phy
);
1718 ret
= phy_power_on(pcie
->phy
);
1720 phy_exit(pcie
->phy
);
1727 static int advk_pcie_setup_phy(struct advk_pcie
*pcie
)
1729 struct device
*dev
= &pcie
->pdev
->dev
;
1730 struct device_node
*node
= dev
->of_node
;
1733 pcie
->phy
= devm_of_phy_get(dev
, node
, NULL
);
1734 if (IS_ERR(pcie
->phy
) && (PTR_ERR(pcie
->phy
) == -EPROBE_DEFER
))
1735 return PTR_ERR(pcie
->phy
);
1737 /* Old bindings miss the PHY handle */
1738 if (IS_ERR(pcie
->phy
)) {
1739 dev_warn(dev
, "PHY unavailable (%ld)\n", PTR_ERR(pcie
->phy
));
1744 ret
= advk_pcie_enable_phy(pcie
);
1746 dev_err(dev
, "Failed to initialize PHY (%d)\n", ret
);
1751 static int advk_pcie_probe(struct platform_device
*pdev
)
1753 struct device
*dev
= &pdev
->dev
;
1754 struct advk_pcie
*pcie
;
1755 struct pci_host_bridge
*bridge
;
1756 struct resource_entry
*entry
;
1759 bridge
= devm_pci_alloc_host_bridge(dev
, sizeof(struct advk_pcie
));
1763 pcie
= pci_host_bridge_priv(bridge
);
1765 platform_set_drvdata(pdev
, pcie
);
1767 resource_list_for_each_entry(entry
, &bridge
->windows
) {
1768 resource_size_t start
= entry
->res
->start
;
1769 resource_size_t size
= resource_size(entry
->res
);
1770 unsigned long type
= resource_type(entry
->res
);
1774 * Aardvark hardware allows to configure also PCIe window
1775 * for config type 0 and type 1 mapping, but driver uses
1776 * only PIO for issuing configuration transfers which does
1777 * not use PCIe window configuration.
1779 if (type
!= IORESOURCE_MEM
&& type
!= IORESOURCE_IO
)
1783 * Skip transparent memory resources. Default outbound access
1784 * configuration is set to transparent memory access so it
1785 * does not need window configuration.
1787 if (type
== IORESOURCE_MEM
&& entry
->offset
== 0)
1791 * The n-th PCIe window is configured by tuple (match, remap, mask)
1792 * and an access to address A uses this window if A matches the
1793 * match with given mask.
1794 * So every PCIe window size must be a power of two and every start
1795 * address must be aligned to window size. Minimal size is 64 KiB
1796 * because lower 16 bits of mask must be zero. Remapped address
1797 * may have set only bits from the mask.
1799 while (pcie
->wins_count
< OB_WIN_COUNT
&& size
> 0) {
1800 /* Calculate the largest aligned window size */
1801 win_size
= (1ULL << (fls64(size
)-1)) |
1802 (start
? (1ULL << __ffs64(start
)) : 0);
1803 win_size
= 1ULL << __ffs64(win_size
);
1804 if (win_size
< 0x10000)
1808 "Configuring PCIe window %d: [0x%llx-0x%llx] as %lu\n",
1809 pcie
->wins_count
, (unsigned long long)start
,
1810 (unsigned long long)start
+ win_size
, type
);
1812 if (type
== IORESOURCE_IO
) {
1813 pcie
->wins
[pcie
->wins_count
].actions
= OB_WIN_TYPE_IO
;
1814 pcie
->wins
[pcie
->wins_count
].match
= pci_pio_to_address(start
);
1816 pcie
->wins
[pcie
->wins_count
].actions
= OB_WIN_TYPE_MEM
;
1817 pcie
->wins
[pcie
->wins_count
].match
= start
;
1819 pcie
->wins
[pcie
->wins_count
].remap
= start
- entry
->offset
;
1820 pcie
->wins
[pcie
->wins_count
].mask
= ~(win_size
- 1);
1822 if (pcie
->wins
[pcie
->wins_count
].remap
& (win_size
- 1))
1831 dev_err(&pcie
->pdev
->dev
,
1832 "Invalid PCIe region [0x%llx-0x%llx]\n",
1833 (unsigned long long)entry
->res
->start
,
1834 (unsigned long long)entry
->res
->end
+ 1);
1839 pcie
->base
= devm_platform_ioremap_resource(pdev
, 0);
1840 if (IS_ERR(pcie
->base
))
1841 return PTR_ERR(pcie
->base
);
1843 irq
= platform_get_irq(pdev
, 0);
1847 ret
= devm_request_irq(dev
, irq
, advk_pcie_irq_handler
,
1848 IRQF_SHARED
| IRQF_NO_THREAD
, "advk-pcie",
1851 dev_err(dev
, "Failed to register interrupt\n");
1855 pcie
->reset_gpio
= devm_gpiod_get_optional(dev
, "reset", GPIOD_OUT_LOW
);
1856 ret
= PTR_ERR_OR_ZERO(pcie
->reset_gpio
);
1858 if (ret
!= -EPROBE_DEFER
)
1859 dev_err(dev
, "Failed to get reset-gpio: %i\n", ret
);
1863 ret
= gpiod_set_consumer_name(pcie
->reset_gpio
, "pcie1-reset");
1865 dev_err(dev
, "Failed to set reset gpio name: %d\n", ret
);
1869 ret
= of_pci_get_max_link_speed(dev
->of_node
);
1870 if (ret
<= 0 || ret
> 3)
1873 pcie
->link_gen
= ret
;
1875 ret
= advk_pcie_setup_phy(pcie
);
1879 advk_pcie_setup_hw(pcie
);
1881 ret
= advk_sw_pci_bridge_init(pcie
);
1883 dev_err(dev
, "Failed to register emulated root PCI bridge\n");
1887 ret
= advk_pcie_init_irq_domain(pcie
);
1889 dev_err(dev
, "Failed to initialize irq\n");
1893 ret
= advk_pcie_init_msi_irq_domain(pcie
);
1895 dev_err(dev
, "Failed to initialize irq\n");
1896 advk_pcie_remove_irq_domain(pcie
);
1900 ret
= advk_pcie_init_rp_irq_domain(pcie
);
1902 dev_err(dev
, "Failed to initialize irq\n");
1903 advk_pcie_remove_msi_irq_domain(pcie
);
1904 advk_pcie_remove_irq_domain(pcie
);
1908 bridge
->sysdata
= pcie
;
1909 bridge
->ops
= &advk_pcie_ops
;
1910 bridge
->map_irq
= advk_pcie_map_irq
;
1912 ret
= pci_host_probe(bridge
);
1914 advk_pcie_remove_rp_irq_domain(pcie
);
1915 advk_pcie_remove_msi_irq_domain(pcie
);
1916 advk_pcie_remove_irq_domain(pcie
);
1923 static void advk_pcie_remove(struct platform_device
*pdev
)
1925 struct advk_pcie
*pcie
= platform_get_drvdata(pdev
);
1926 struct pci_host_bridge
*bridge
= pci_host_bridge_from_priv(pcie
);
1930 /* Remove PCI bus with all devices */
1931 pci_lock_rescan_remove();
1932 pci_stop_root_bus(bridge
->bus
);
1933 pci_remove_root_bus(bridge
->bus
);
1934 pci_unlock_rescan_remove();
1936 /* Disable Root Bridge I/O space, memory space and bus mastering */
1937 val
= advk_readl(pcie
, PCIE_CORE_CMD_STATUS_REG
);
1938 val
&= ~(PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
);
1939 advk_writel(pcie
, val
, PCIE_CORE_CMD_STATUS_REG
);
1942 val
= advk_readl(pcie
, PCIE_CORE_CTRL2_REG
);
1943 val
&= ~PCIE_CORE_CTRL2_MSI_ENABLE
;
1944 advk_writel(pcie
, val
, PCIE_CORE_CTRL2_REG
);
1946 /* Clear MSI address */
1947 advk_writel(pcie
, 0, PCIE_MSI_ADDR_LOW_REG
);
1948 advk_writel(pcie
, 0, PCIE_MSI_ADDR_HIGH_REG
);
1950 /* Mask all interrupts */
1951 advk_writel(pcie
, PCIE_MSI_ALL_MASK
, PCIE_MSI_MASK_REG
);
1952 advk_writel(pcie
, PCIE_ISR0_ALL_MASK
, PCIE_ISR0_MASK_REG
);
1953 advk_writel(pcie
, PCIE_ISR1_ALL_MASK
, PCIE_ISR1_MASK_REG
);
1954 advk_writel(pcie
, PCIE_IRQ_ALL_MASK
, HOST_CTRL_INT_MASK_REG
);
1956 /* Clear all interrupts */
1957 advk_writel(pcie
, PCIE_MSI_ALL_MASK
, PCIE_MSI_STATUS_REG
);
1958 advk_writel(pcie
, PCIE_ISR0_ALL_MASK
, PCIE_ISR0_REG
);
1959 advk_writel(pcie
, PCIE_ISR1_ALL_MASK
, PCIE_ISR1_REG
);
1960 advk_writel(pcie
, PCIE_IRQ_ALL_MASK
, HOST_CTRL_INT_STATUS_REG
);
1962 /* Remove IRQ domains */
1963 advk_pcie_remove_rp_irq_domain(pcie
);
1964 advk_pcie_remove_msi_irq_domain(pcie
);
1965 advk_pcie_remove_irq_domain(pcie
);
1967 /* Free config space for emulated root bridge */
1968 pci_bridge_emul_cleanup(&pcie
->bridge
);
1970 /* Assert PERST# signal which prepares PCIe card for power down */
1971 if (pcie
->reset_gpio
)
1972 gpiod_set_value_cansleep(pcie
->reset_gpio
, 1);
1974 /* Disable link training */
1975 val
= advk_readl(pcie
, PCIE_CORE_CTRL0_REG
);
1976 val
&= ~LINK_TRAINING_EN
;
1977 advk_writel(pcie
, val
, PCIE_CORE_CTRL0_REG
);
1979 /* Disable outbound address windows mapping */
1980 for (i
= 0; i
< OB_WIN_COUNT
; i
++)
1981 advk_pcie_disable_ob_win(pcie
, i
);
1984 advk_pcie_disable_phy(pcie
);
1987 static const struct of_device_id advk_pcie_of_match_table
[] = {
1988 { .compatible
= "marvell,armada-3700-pcie", },
1991 MODULE_DEVICE_TABLE(of
, advk_pcie_of_match_table
);
1993 static struct platform_driver advk_pcie_driver
= {
1995 .name
= "advk-pcie",
1996 .of_match_table
= advk_pcie_of_match_table
,
1998 .probe
= advk_pcie_probe
,
1999 .remove
= advk_pcie_remove
,
2001 module_platform_driver(advk_pcie_driver
);
2003 MODULE_DESCRIPTION("Aardvark PCIe controller");
2004 MODULE_LICENSE("GPL v2");