1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2017 Cadence
3 // Cadence PCIe host controller driver.
4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
6 #include <linux/delay.h>
7 #include <linux/kernel.h>
8 #include <linux/list_sort.h>
9 #include <linux/of_address.h>
10 #include <linux/of_pci.h>
11 #include <linux/platform_device.h>
13 #include "pcie-cadence.h"
15 static u64 bar_max_size
[] = {
16 [RP_BAR0
] = _ULL(128 * SZ_2G
),
18 [RP_NO_BAR
] = _BITULL(63),
21 static u8 bar_aperture_mask
[] = {
26 void __iomem
*cdns_pci_map_bus(struct pci_bus
*bus
, unsigned int devfn
,
29 struct pci_host_bridge
*bridge
= pci_find_host_bridge(bus
);
30 struct cdns_pcie_rc
*rc
= pci_host_bridge_priv(bridge
);
31 struct cdns_pcie
*pcie
= &rc
->pcie
;
32 unsigned int busn
= bus
->number
;
35 if (pci_is_root_bus(bus
)) {
37 * Only the root port (devfn == 0) is connected to this bus.
38 * All other PCI devices are behind some bridge hence on another
44 return pcie
->reg_base
+ (where
& 0xfff);
46 /* Check that the link is up */
47 if (!(cdns_pcie_readl(pcie
, CDNS_PCIE_LM_BASE
) & 0x1))
49 /* Clear AXI link-down status */
50 cdns_pcie_writel(pcie
, CDNS_PCIE_AT_LINKDOWN
, 0x0);
52 /* Update Output registers for AXI region 0. */
53 addr0
= CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
54 CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn
) |
55 CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(busn
);
56 cdns_pcie_writel(pcie
, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(0), addr0
);
58 /* Configuration Type 0 or Type 1 access. */
59 desc0
= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID
|
60 CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
62 * The bus number was already set once for all in desc1 by
63 * cdns_pcie_host_init_address_translation().
65 if (busn
== bridge
->busnr
+ 1)
66 desc0
|= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0
;
68 desc0
|= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1
;
69 cdns_pcie_writel(pcie
, CDNS_PCIE_AT_OB_REGION_DESC0(0), desc0
);
71 return rc
->cfg_base
+ (where
& 0xfff);
74 static struct pci_ops cdns_pcie_host_ops
= {
75 .map_bus
= cdns_pci_map_bus
,
76 .read
= pci_generic_config_read
,
77 .write
= pci_generic_config_write
,
81 static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc
*rc
)
83 struct cdns_pcie
*pcie
= &rc
->pcie
;
88 * Set the root complex BAR configuration register:
89 * - disable both BAR0 and BAR1.
90 * - enable Prefetchable Memory Base and Limit registers in type 1
91 * config space (64 bits).
92 * - enable IO Base and Limit registers in type 1 config
95 ctrl
= CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED
;
96 value
= CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl
) |
97 CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl
) |
98 CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE
|
99 CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS
|
100 CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE
|
101 CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS
;
102 cdns_pcie_writel(pcie
, CDNS_PCIE_LM_RC_BAR_CFG
, value
);
104 /* Set root port configuration space */
105 if (rc
->vendor_id
!= 0xffff) {
106 id
= CDNS_PCIE_LM_ID_VENDOR(rc
->vendor_id
) |
107 CDNS_PCIE_LM_ID_SUBSYS(rc
->vendor_id
);
108 cdns_pcie_writel(pcie
, CDNS_PCIE_LM_ID
, id
);
111 if (rc
->device_id
!= 0xffff)
112 cdns_pcie_rp_writew(pcie
, PCI_DEVICE_ID
, rc
->device_id
);
114 cdns_pcie_rp_writeb(pcie
, PCI_CLASS_REVISION
, 0);
115 cdns_pcie_rp_writeb(pcie
, PCI_CLASS_PROG
, 0);
116 cdns_pcie_rp_writew(pcie
, PCI_CLASS_DEVICE
, PCI_CLASS_BRIDGE_PCI
);
121 static int cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc
*rc
,
122 enum cdns_pcie_rp_bar bar
,
123 u64 cpu_addr
, u64 size
,
126 struct cdns_pcie
*pcie
= &rc
->pcie
;
127 u32 addr0
, addr1
, aperture
, value
;
129 if (!rc
->avail_ib_bar
[bar
])
132 rc
->avail_ib_bar
[bar
] = false;
134 aperture
= ilog2(size
);
135 addr0
= CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(aperture
) |
136 (lower_32_bits(cpu_addr
) & GENMASK(31, 8));
137 addr1
= upper_32_bits(cpu_addr
);
138 cdns_pcie_writel(pcie
, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar
), addr0
);
139 cdns_pcie_writel(pcie
, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar
), addr1
);
141 if (bar
== RP_NO_BAR
)
144 value
= cdns_pcie_readl(pcie
, CDNS_PCIE_LM_RC_BAR_CFG
);
145 value
&= ~(LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar
) |
146 LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar
) |
147 LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar
) |
148 LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar
) |
149 LM_RC_BAR_CFG_APERTURE(bar
, bar_aperture_mask
[bar
] + 2));
150 if (size
+ cpu_addr
>= SZ_4G
) {
151 if (!(flags
& IORESOURCE_PREFETCH
))
152 value
|= LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar
);
153 value
|= LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar
);
155 if (!(flags
& IORESOURCE_PREFETCH
))
156 value
|= LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar
);
157 value
|= LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar
);
160 value
|= LM_RC_BAR_CFG_APERTURE(bar
, aperture
);
161 cdns_pcie_writel(pcie
, CDNS_PCIE_LM_RC_BAR_CFG
, value
);
166 static enum cdns_pcie_rp_bar
167 cdns_pcie_host_find_min_bar(struct cdns_pcie_rc
*rc
, u64 size
)
169 enum cdns_pcie_rp_bar bar
, sel_bar
;
171 sel_bar
= RP_BAR_UNDEFINED
;
172 for (bar
= RP_BAR0
; bar
<= RP_NO_BAR
; bar
++) {
173 if (!rc
->avail_ib_bar
[bar
])
176 if (size
<= bar_max_size
[bar
]) {
177 if (sel_bar
== RP_BAR_UNDEFINED
) {
182 if (bar_max_size
[bar
] < bar_max_size
[sel_bar
])
190 static enum cdns_pcie_rp_bar
191 cdns_pcie_host_find_max_bar(struct cdns_pcie_rc
*rc
, u64 size
)
193 enum cdns_pcie_rp_bar bar
, sel_bar
;
195 sel_bar
= RP_BAR_UNDEFINED
;
196 for (bar
= RP_BAR0
; bar
<= RP_NO_BAR
; bar
++) {
197 if (!rc
->avail_ib_bar
[bar
])
200 if (size
>= bar_max_size
[bar
]) {
201 if (sel_bar
== RP_BAR_UNDEFINED
) {
206 if (bar_max_size
[bar
] > bar_max_size
[sel_bar
])
214 static int cdns_pcie_host_bar_config(struct cdns_pcie_rc
*rc
,
215 struct resource_entry
*entry
)
217 u64 cpu_addr
, pci_addr
, size
, winsize
;
218 struct cdns_pcie
*pcie
= &rc
->pcie
;
219 struct device
*dev
= pcie
->dev
;
220 enum cdns_pcie_rp_bar bar
;
224 cpu_addr
= entry
->res
->start
;
225 pci_addr
= entry
->res
->start
- entry
->offset
;
226 flags
= entry
->res
->flags
;
227 size
= resource_size(entry
->res
);
230 dev_err(dev
, "PCI addr: %llx must be equal to CPU addr: %llx\n",
237 * Try to find a minimum BAR whose size is greater than
238 * or equal to the remaining resource_entry size. This will
239 * fail if the size of each of the available BARs is less than
240 * the remaining resource_entry size.
241 * If a minimum BAR is found, IB ATU will be configured and
244 bar
= cdns_pcie_host_find_min_bar(rc
, size
);
245 if (bar
!= RP_BAR_UNDEFINED
) {
246 ret
= cdns_pcie_host_bar_ib_config(rc
, bar
, cpu_addr
,
249 dev_err(dev
, "IB BAR: %d config failed\n", bar
);
254 * If the control reaches here, it would mean the remaining
255 * resource_entry size cannot be fitted in a single BAR. So we
256 * find a maximum BAR whose size is less than or equal to the
257 * remaining resource_entry size and split the resource entry
258 * so that part of resource entry is fitted inside the maximum
259 * BAR. The remaining size would be fitted during the next
260 * iteration of the loop.
261 * If a maximum BAR is not found, there is no way we can fit
262 * this resource_entry, so we error out.
264 bar
= cdns_pcie_host_find_max_bar(rc
, size
);
265 if (bar
== RP_BAR_UNDEFINED
) {
266 dev_err(dev
, "No free BAR to map cpu_addr %llx\n",
271 winsize
= bar_max_size
[bar
];
272 ret
= cdns_pcie_host_bar_ib_config(rc
, bar
, cpu_addr
, winsize
,
275 dev_err(dev
, "IB BAR: %d config failed\n", bar
);
286 static int cdns_pcie_host_dma_ranges_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
288 struct resource_entry
*entry1
, *entry2
;
290 entry1
= container_of(a
, struct resource_entry
, node
);
291 entry2
= container_of(b
, struct resource_entry
, node
);
293 return resource_size(entry2
->res
) - resource_size(entry1
->res
);
296 static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc
*rc
)
298 struct cdns_pcie
*pcie
= &rc
->pcie
;
299 struct device
*dev
= pcie
->dev
;
300 struct device_node
*np
= dev
->of_node
;
301 struct pci_host_bridge
*bridge
;
302 struct resource_entry
*entry
;
303 u32 no_bar_nbits
= 32;
306 bridge
= pci_host_bridge_from_priv(rc
);
310 if (list_empty(&bridge
->dma_ranges
)) {
311 of_property_read_u32(np
, "cdns,no-bar-match-nbits",
313 err
= cdns_pcie_host_bar_ib_config(rc
, RP_NO_BAR
, 0x0,
314 (u64
)1 << no_bar_nbits
, 0);
316 dev_err(dev
, "IB BAR: %d config failed\n", RP_NO_BAR
);
320 list_sort(NULL
, &bridge
->dma_ranges
, cdns_pcie_host_dma_ranges_cmp
);
322 resource_list_for_each_entry(entry
, &bridge
->dma_ranges
) {
323 err
= cdns_pcie_host_bar_config(rc
, entry
);
325 dev_err(dev
, "Fail to configure IB using dma-ranges\n");
332 static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc
*rc
)
334 struct cdns_pcie
*pcie
= &rc
->pcie
;
335 struct pci_host_bridge
*bridge
= pci_host_bridge_from_priv(rc
);
336 struct resource
*cfg_res
= rc
->cfg_res
;
337 struct resource_entry
*entry
;
338 u64 cpu_addr
= cfg_res
->start
;
339 u32 addr0
, addr1
, desc1
;
342 entry
= resource_list_first_type(&bridge
->windows
, IORESOURCE_BUS
);
344 busnr
= entry
->res
->start
;
347 * Reserve region 0 for PCI configure space accesses:
348 * OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by
349 * cdns_pci_map_bus(), other region registers are set here once for all.
351 addr1
= 0; /* Should be programmed to zero. */
352 desc1
= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr
);
353 cdns_pcie_writel(pcie
, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1
);
354 cdns_pcie_writel(pcie
, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1
);
356 if (pcie
->ops
->cpu_addr_fixup
)
357 cpu_addr
= pcie
->ops
->cpu_addr_fixup(pcie
, cpu_addr
);
359 addr0
= CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) |
360 (lower_32_bits(cpu_addr
) & GENMASK(31, 8));
361 addr1
= upper_32_bits(cpu_addr
);
362 cdns_pcie_writel(pcie
, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(0), addr0
);
363 cdns_pcie_writel(pcie
, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(0), addr1
);
366 resource_list_for_each_entry(entry
, &bridge
->windows
) {
367 struct resource
*res
= entry
->res
;
368 u64 pci_addr
= res
->start
- entry
->offset
;
370 if (resource_type(res
) == IORESOURCE_IO
)
371 cdns_pcie_set_outbound_region(pcie
, busnr
, 0, r
,
373 pci_pio_to_address(res
->start
),
377 cdns_pcie_set_outbound_region(pcie
, busnr
, 0, r
,
386 return cdns_pcie_host_map_dma_ranges(rc
);
389 static int cdns_pcie_host_init(struct device
*dev
,
390 struct cdns_pcie_rc
*rc
)
394 err
= cdns_pcie_host_init_root_port(rc
);
398 return cdns_pcie_host_init_address_translation(rc
);
401 static int cdns_pcie_host_wait_for_link(struct cdns_pcie
*pcie
)
403 struct device
*dev
= pcie
->dev
;
406 /* Check if the link is up or not */
407 for (retries
= 0; retries
< LINK_WAIT_MAX_RETRIES
; retries
++) {
408 if (cdns_pcie_link_up(pcie
)) {
409 dev_info(dev
, "Link up\n");
412 usleep_range(LINK_WAIT_USLEEP_MIN
, LINK_WAIT_USLEEP_MAX
);
418 int cdns_pcie_host_setup(struct cdns_pcie_rc
*rc
)
420 struct device
*dev
= rc
->pcie
.dev
;
421 struct platform_device
*pdev
= to_platform_device(dev
);
422 struct device_node
*np
= dev
->of_node
;
423 struct pci_host_bridge
*bridge
;
424 enum cdns_pcie_rp_bar bar
;
425 struct cdns_pcie
*pcie
;
426 struct resource
*res
;
429 bridge
= pci_host_bridge_from_priv(rc
);
436 rc
->vendor_id
= 0xffff;
437 of_property_read_u32(np
, "vendor-id", &rc
->vendor_id
);
439 rc
->device_id
= 0xffff;
440 of_property_read_u32(np
, "device-id", &rc
->device_id
);
442 pcie
->reg_base
= devm_platform_ioremap_resource_byname(pdev
, "reg");
443 if (IS_ERR(pcie
->reg_base
)) {
444 dev_err(dev
, "missing \"reg\"\n");
445 return PTR_ERR(pcie
->reg_base
);
448 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "cfg");
449 rc
->cfg_base
= devm_pci_remap_cfg_resource(dev
, res
);
450 if (IS_ERR(rc
->cfg_base
))
451 return PTR_ERR(rc
->cfg_base
);
454 ret
= cdns_pcie_start_link(pcie
);
456 dev_err(dev
, "Failed to start link\n");
460 ret
= cdns_pcie_host_wait_for_link(pcie
);
462 dev_dbg(dev
, "PCIe link never came up\n");
464 for (bar
= RP_BAR0
; bar
<= RP_NO_BAR
; bar
++)
465 rc
->avail_ib_bar
[bar
] = true;
467 ret
= cdns_pcie_host_init(dev
, rc
);
472 bridge
->ops
= &cdns_pcie_host_ops
;
474 ret
= pci_host_probe(bridge
);
481 pm_runtime_put_sync(dev
);