1 // SPDX-License-Identifier: GPL-2.0
3 * Synopsys DesignWare PCIe host controller driver
5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com
8 * Author: Jingoo Han <jg1.han@samsung.com>
11 #include <linux/delay.h>
13 #include <linux/types.h>
15 #include "../../pci.h"
16 #include "pcie-designware.h"
19 * These interfaces resemble the pci_find_*capability() interfaces, but these
20 * are for configuring host controllers, which are bridges *to* PCI devices but
21 * are not PCI devices themselves.
23 static u8
__dw_pcie_find_next_cap(struct dw_pcie
*pci
, u8 cap_ptr
,
26 u8 cap_id
, next_cap_ptr
;
32 reg
= dw_pcie_readw_dbi(pci
, cap_ptr
);
33 cap_id
= (reg
& 0x00ff);
35 if (cap_id
> PCI_CAP_ID_MAX
)
41 next_cap_ptr
= (reg
& 0xff00) >> 8;
42 return __dw_pcie_find_next_cap(pci
, next_cap_ptr
, cap
);
45 u8
dw_pcie_find_capability(struct dw_pcie
*pci
, u8 cap
)
50 reg
= dw_pcie_readw_dbi(pci
, PCI_CAPABILITY_LIST
);
51 next_cap_ptr
= (reg
& 0x00ff);
53 return __dw_pcie_find_next_cap(pci
, next_cap_ptr
, cap
);
55 EXPORT_SYMBOL_GPL(dw_pcie_find_capability
);
57 static u16
dw_pcie_find_next_ext_capability(struct dw_pcie
*pci
, u16 start
,
62 int pos
= PCI_CFG_SPACE_SIZE
;
64 /* minimum 8 bytes per capability */
65 ttl
= (PCI_CFG_SPACE_EXP_SIZE
- PCI_CFG_SPACE_SIZE
) / 8;
70 header
= dw_pcie_readl_dbi(pci
, pos
);
72 * If we have no capabilities, this is indicated by cap ID,
73 * cap version and next pointer all being 0.
79 if (PCI_EXT_CAP_ID(header
) == cap
&& pos
!= start
)
82 pos
= PCI_EXT_CAP_NEXT(header
);
83 if (pos
< PCI_CFG_SPACE_SIZE
)
86 header
= dw_pcie_readl_dbi(pci
, pos
);
92 u16
dw_pcie_find_ext_capability(struct dw_pcie
*pci
, u8 cap
)
94 return dw_pcie_find_next_ext_capability(pci
, 0, cap
);
96 EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability
);
98 int dw_pcie_read(void __iomem
*addr
, int size
, u32
*val
)
100 if (!IS_ALIGNED((uintptr_t)addr
, size
)) {
102 return PCIBIOS_BAD_REGISTER_NUMBER
;
107 } else if (size
== 2) {
109 } else if (size
== 1) {
113 return PCIBIOS_BAD_REGISTER_NUMBER
;
116 return PCIBIOS_SUCCESSFUL
;
118 EXPORT_SYMBOL_GPL(dw_pcie_read
);
120 int dw_pcie_write(void __iomem
*addr
, int size
, u32 val
)
122 if (!IS_ALIGNED((uintptr_t)addr
, size
))
123 return PCIBIOS_BAD_REGISTER_NUMBER
;
132 return PCIBIOS_BAD_REGISTER_NUMBER
;
134 return PCIBIOS_SUCCESSFUL
;
136 EXPORT_SYMBOL_GPL(dw_pcie_write
);
138 u32
dw_pcie_read_dbi(struct dw_pcie
*pci
, u32 reg
, size_t size
)
143 if (pci
->ops
->read_dbi
)
144 return pci
->ops
->read_dbi(pci
, pci
->dbi_base
, reg
, size
);
146 ret
= dw_pcie_read(pci
->dbi_base
+ reg
, size
, &val
);
148 dev_err(pci
->dev
, "Read DBI address failed\n");
152 EXPORT_SYMBOL_GPL(dw_pcie_read_dbi
);
154 void dw_pcie_write_dbi(struct dw_pcie
*pci
, u32 reg
, size_t size
, u32 val
)
158 if (pci
->ops
->write_dbi
) {
159 pci
->ops
->write_dbi(pci
, pci
->dbi_base
, reg
, size
, val
);
163 ret
= dw_pcie_write(pci
->dbi_base
+ reg
, size
, val
);
165 dev_err(pci
->dev
, "Write DBI address failed\n");
167 EXPORT_SYMBOL_GPL(dw_pcie_write_dbi
);
169 u32
dw_pcie_read_dbi2(struct dw_pcie
*pci
, u32 reg
, size_t size
)
174 if (pci
->ops
->read_dbi2
)
175 return pci
->ops
->read_dbi2(pci
, pci
->dbi_base2
, reg
, size
);
177 ret
= dw_pcie_read(pci
->dbi_base2
+ reg
, size
, &val
);
179 dev_err(pci
->dev
, "read DBI address failed\n");
184 void dw_pcie_write_dbi2(struct dw_pcie
*pci
, u32 reg
, size_t size
, u32 val
)
188 if (pci
->ops
->write_dbi2
) {
189 pci
->ops
->write_dbi2(pci
, pci
->dbi_base2
, reg
, size
, val
);
193 ret
= dw_pcie_write(pci
->dbi_base2
+ reg
, size
, val
);
195 dev_err(pci
->dev
, "write DBI address failed\n");
198 u32
dw_pcie_read_atu(struct dw_pcie
*pci
, u32 reg
, size_t size
)
203 if (pci
->ops
->read_dbi
)
204 return pci
->ops
->read_dbi(pci
, pci
->atu_base
, reg
, size
);
206 ret
= dw_pcie_read(pci
->atu_base
+ reg
, size
, &val
);
208 dev_err(pci
->dev
, "Read ATU address failed\n");
213 void dw_pcie_write_atu(struct dw_pcie
*pci
, u32 reg
, size_t size
, u32 val
)
217 if (pci
->ops
->write_dbi
) {
218 pci
->ops
->write_dbi(pci
, pci
->atu_base
, reg
, size
, val
);
222 ret
= dw_pcie_write(pci
->atu_base
+ reg
, size
, val
);
224 dev_err(pci
->dev
, "Write ATU address failed\n");
227 static u32
dw_pcie_readl_ob_unroll(struct dw_pcie
*pci
, u32 index
, u32 reg
)
229 u32 offset
= PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index
);
231 return dw_pcie_readl_atu(pci
, offset
+ reg
);
234 static void dw_pcie_writel_ob_unroll(struct dw_pcie
*pci
, u32 index
, u32 reg
,
237 u32 offset
= PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index
);
239 dw_pcie_writel_atu(pci
, offset
+ reg
, val
);
242 static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie
*pci
, int index
,
243 int type
, u64 cpu_addr
,
244 u64 pci_addr
, u32 size
)
248 dw_pcie_writel_ob_unroll(pci
, index
, PCIE_ATU_UNR_LOWER_BASE
,
249 lower_32_bits(cpu_addr
));
250 dw_pcie_writel_ob_unroll(pci
, index
, PCIE_ATU_UNR_UPPER_BASE
,
251 upper_32_bits(cpu_addr
));
252 dw_pcie_writel_ob_unroll(pci
, index
, PCIE_ATU_UNR_LIMIT
,
253 lower_32_bits(cpu_addr
+ size
- 1));
254 dw_pcie_writel_ob_unroll(pci
, index
, PCIE_ATU_UNR_LOWER_TARGET
,
255 lower_32_bits(pci_addr
));
256 dw_pcie_writel_ob_unroll(pci
, index
, PCIE_ATU_UNR_UPPER_TARGET
,
257 upper_32_bits(pci_addr
));
258 dw_pcie_writel_ob_unroll(pci
, index
, PCIE_ATU_UNR_REGION_CTRL1
,
260 dw_pcie_writel_ob_unroll(pci
, index
, PCIE_ATU_UNR_REGION_CTRL2
,
264 * Make sure ATU enable takes effect before any subsequent config
267 for (retries
= 0; retries
< LINK_WAIT_MAX_IATU_RETRIES
; retries
++) {
268 val
= dw_pcie_readl_ob_unroll(pci
, index
,
269 PCIE_ATU_UNR_REGION_CTRL2
);
270 if (val
& PCIE_ATU_ENABLE
)
273 mdelay(LINK_WAIT_IATU
);
275 dev_err(pci
->dev
, "Outbound iATU is not being enabled\n");
278 void dw_pcie_prog_outbound_atu(struct dw_pcie
*pci
, int index
, int type
,
279 u64 cpu_addr
, u64 pci_addr
, u32 size
)
283 if (pci
->ops
->cpu_addr_fixup
)
284 cpu_addr
= pci
->ops
->cpu_addr_fixup(pci
, cpu_addr
);
286 if (pci
->iatu_unroll_enabled
) {
287 dw_pcie_prog_outbound_atu_unroll(pci
, index
, type
, cpu_addr
,
292 dw_pcie_writel_dbi(pci
, PCIE_ATU_VIEWPORT
,
293 PCIE_ATU_REGION_OUTBOUND
| index
);
294 dw_pcie_writel_dbi(pci
, PCIE_ATU_LOWER_BASE
,
295 lower_32_bits(cpu_addr
));
296 dw_pcie_writel_dbi(pci
, PCIE_ATU_UPPER_BASE
,
297 upper_32_bits(cpu_addr
));
298 dw_pcie_writel_dbi(pci
, PCIE_ATU_LIMIT
,
299 lower_32_bits(cpu_addr
+ size
- 1));
300 dw_pcie_writel_dbi(pci
, PCIE_ATU_LOWER_TARGET
,
301 lower_32_bits(pci_addr
));
302 dw_pcie_writel_dbi(pci
, PCIE_ATU_UPPER_TARGET
,
303 upper_32_bits(pci_addr
));
304 dw_pcie_writel_dbi(pci
, PCIE_ATU_CR1
, type
);
305 dw_pcie_writel_dbi(pci
, PCIE_ATU_CR2
, PCIE_ATU_ENABLE
);
308 * Make sure ATU enable takes effect before any subsequent config
311 for (retries
= 0; retries
< LINK_WAIT_MAX_IATU_RETRIES
; retries
++) {
312 val
= dw_pcie_readl_dbi(pci
, PCIE_ATU_CR2
);
313 if (val
& PCIE_ATU_ENABLE
)
316 mdelay(LINK_WAIT_IATU
);
318 dev_err(pci
->dev
, "Outbound iATU is not being enabled\n");
321 static u32
dw_pcie_readl_ib_unroll(struct dw_pcie
*pci
, u32 index
, u32 reg
)
323 u32 offset
= PCIE_GET_ATU_INB_UNR_REG_OFFSET(index
);
325 return dw_pcie_readl_atu(pci
, offset
+ reg
);
328 static void dw_pcie_writel_ib_unroll(struct dw_pcie
*pci
, u32 index
, u32 reg
,
331 u32 offset
= PCIE_GET_ATU_INB_UNR_REG_OFFSET(index
);
333 dw_pcie_writel_atu(pci
, offset
+ reg
, val
);
336 static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie
*pci
, int index
,
337 int bar
, u64 cpu_addr
,
338 enum dw_pcie_as_type as_type
)
343 dw_pcie_writel_ib_unroll(pci
, index
, PCIE_ATU_UNR_LOWER_TARGET
,
344 lower_32_bits(cpu_addr
));
345 dw_pcie_writel_ib_unroll(pci
, index
, PCIE_ATU_UNR_UPPER_TARGET
,
346 upper_32_bits(cpu_addr
));
350 type
= PCIE_ATU_TYPE_MEM
;
353 type
= PCIE_ATU_TYPE_IO
;
359 dw_pcie_writel_ib_unroll(pci
, index
, PCIE_ATU_UNR_REGION_CTRL1
, type
);
360 dw_pcie_writel_ib_unroll(pci
, index
, PCIE_ATU_UNR_REGION_CTRL2
,
362 PCIE_ATU_BAR_MODE_ENABLE
| (bar
<< 8));
365 * Make sure ATU enable takes effect before any subsequent config
368 for (retries
= 0; retries
< LINK_WAIT_MAX_IATU_RETRIES
; retries
++) {
369 val
= dw_pcie_readl_ib_unroll(pci
, index
,
370 PCIE_ATU_UNR_REGION_CTRL2
);
371 if (val
& PCIE_ATU_ENABLE
)
374 mdelay(LINK_WAIT_IATU
);
376 dev_err(pci
->dev
, "Inbound iATU is not being enabled\n");
381 int dw_pcie_prog_inbound_atu(struct dw_pcie
*pci
, int index
, int bar
,
382 u64 cpu_addr
, enum dw_pcie_as_type as_type
)
387 if (pci
->iatu_unroll_enabled
)
388 return dw_pcie_prog_inbound_atu_unroll(pci
, index
, bar
,
391 dw_pcie_writel_dbi(pci
, PCIE_ATU_VIEWPORT
, PCIE_ATU_REGION_INBOUND
|
393 dw_pcie_writel_dbi(pci
, PCIE_ATU_LOWER_TARGET
, lower_32_bits(cpu_addr
));
394 dw_pcie_writel_dbi(pci
, PCIE_ATU_UPPER_TARGET
, upper_32_bits(cpu_addr
));
398 type
= PCIE_ATU_TYPE_MEM
;
401 type
= PCIE_ATU_TYPE_IO
;
407 dw_pcie_writel_dbi(pci
, PCIE_ATU_CR1
, type
);
408 dw_pcie_writel_dbi(pci
, PCIE_ATU_CR2
, PCIE_ATU_ENABLE
409 | PCIE_ATU_BAR_MODE_ENABLE
| (bar
<< 8));
412 * Make sure ATU enable takes effect before any subsequent config
415 for (retries
= 0; retries
< LINK_WAIT_MAX_IATU_RETRIES
; retries
++) {
416 val
= dw_pcie_readl_dbi(pci
, PCIE_ATU_CR2
);
417 if (val
& PCIE_ATU_ENABLE
)
420 mdelay(LINK_WAIT_IATU
);
422 dev_err(pci
->dev
, "Inbound iATU is not being enabled\n");
427 void dw_pcie_disable_atu(struct dw_pcie
*pci
, int index
,
428 enum dw_pcie_region_type type
)
433 case DW_PCIE_REGION_INBOUND
:
434 region
= PCIE_ATU_REGION_INBOUND
;
436 case DW_PCIE_REGION_OUTBOUND
:
437 region
= PCIE_ATU_REGION_OUTBOUND
;
443 dw_pcie_writel_dbi(pci
, PCIE_ATU_VIEWPORT
, region
| index
);
444 dw_pcie_writel_dbi(pci
, PCIE_ATU_CR2
, (u32
)~PCIE_ATU_ENABLE
);
447 int dw_pcie_wait_for_link(struct dw_pcie
*pci
)
451 /* Check if the link is up or not */
452 for (retries
= 0; retries
< LINK_WAIT_MAX_RETRIES
; retries
++) {
453 if (dw_pcie_link_up(pci
)) {
454 dev_info(pci
->dev
, "Link up\n");
457 usleep_range(LINK_WAIT_USLEEP_MIN
, LINK_WAIT_USLEEP_MAX
);
460 dev_info(pci
->dev
, "Phy link never came up\n");
464 EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link
);
466 int dw_pcie_link_up(struct dw_pcie
*pci
)
470 if (pci
->ops
->link_up
)
471 return pci
->ops
->link_up(pci
);
473 val
= readl(pci
->dbi_base
+ PCIE_PORT_DEBUG1
);
474 return ((val
& PCIE_PORT_DEBUG1_LINK_UP
) &&
475 (!(val
& PCIE_PORT_DEBUG1_LINK_IN_TRAINING
)));
478 void dw_pcie_upconfig_setup(struct dw_pcie
*pci
)
482 val
= dw_pcie_readl_dbi(pci
, PCIE_PORT_MULTI_LANE_CTRL
);
483 val
|= PORT_MLTI_UPCFG_SUPPORT
;
484 dw_pcie_writel_dbi(pci
, PCIE_PORT_MULTI_LANE_CTRL
, val
);
486 EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup
);
488 void dw_pcie_link_set_max_speed(struct dw_pcie
*pci
, u32 link_gen
)
491 u8 offset
= dw_pcie_find_capability(pci
, PCI_CAP_ID_EXP
);
493 reg
= dw_pcie_readl_dbi(pci
, offset
+ PCI_EXP_LNKCTL2
);
494 reg
&= ~PCI_EXP_LNKCTL2_TLS
;
496 switch (pcie_link_speed
[link_gen
]) {
497 case PCIE_SPEED_2_5GT
:
498 reg
|= PCI_EXP_LNKCTL2_TLS_2_5GT
;
500 case PCIE_SPEED_5_0GT
:
501 reg
|= PCI_EXP_LNKCTL2_TLS_5_0GT
;
503 case PCIE_SPEED_8_0GT
:
504 reg
|= PCI_EXP_LNKCTL2_TLS_8_0GT
;
506 case PCIE_SPEED_16_0GT
:
507 reg
|= PCI_EXP_LNKCTL2_TLS_16_0GT
;
510 /* Use hardware capability */
511 val
= dw_pcie_readl_dbi(pci
, offset
+ PCI_EXP_LNKCAP
);
512 val
= FIELD_GET(PCI_EXP_LNKCAP_SLS
, val
);
513 reg
&= ~PCI_EXP_LNKCTL2_HASD
;
514 reg
|= FIELD_PREP(PCI_EXP_LNKCTL2_TLS
, val
);
518 dw_pcie_writel_dbi(pci
, offset
+ PCI_EXP_LNKCTL2
, reg
);
520 EXPORT_SYMBOL_GPL(dw_pcie_link_set_max_speed
);
522 void dw_pcie_link_set_n_fts(struct dw_pcie
*pci
, u32 n_fts
)
526 val
= dw_pcie_readl_dbi(pci
, PCIE_LINK_WIDTH_SPEED_CONTROL
);
527 val
&= ~PORT_LOGIC_N_FTS_MASK
;
528 val
|= n_fts
& PORT_LOGIC_N_FTS_MASK
;
529 dw_pcie_writel_dbi(pci
, PCIE_LINK_WIDTH_SPEED_CONTROL
, val
);
531 EXPORT_SYMBOL_GPL(dw_pcie_link_set_n_fts
);
533 static u8
dw_pcie_iatu_unroll_enabled(struct dw_pcie
*pci
)
537 val
= dw_pcie_readl_dbi(pci
, PCIE_ATU_VIEWPORT
);
538 if (val
== 0xffffffff)
544 void dw_pcie_setup(struct dw_pcie
*pci
)
549 struct device
*dev
= pci
->dev
;
550 struct device_node
*np
= dev
->of_node
;
552 if (pci
->version
>= 0x480A || (!pci
->version
&&
553 dw_pcie_iatu_unroll_enabled(pci
))) {
554 pci
->iatu_unroll_enabled
= true;
556 pci
->atu_base
= pci
->dbi_base
+ DEFAULT_DBI_ATU_OFFSET
;
558 dev_dbg(pci
->dev
, "iATU unroll: %s\n", pci
->iatu_unroll_enabled
?
559 "enabled" : "disabled");
562 ret
= of_property_read_u32(np
, "num-lanes", &lanes
);
564 dev_dbg(pci
->dev
, "property num-lanes isn't found\n");
568 /* Set the number of lanes */
569 val
= dw_pcie_readl_dbi(pci
, PCIE_PORT_LINK_CONTROL
);
570 val
&= ~PORT_LINK_MODE_MASK
;
573 val
|= PORT_LINK_MODE_1_LANES
;
576 val
|= PORT_LINK_MODE_2_LANES
;
579 val
|= PORT_LINK_MODE_4_LANES
;
582 val
|= PORT_LINK_MODE_8_LANES
;
585 dev_err(pci
->dev
, "num-lanes %u: invalid value\n", lanes
);
588 dw_pcie_writel_dbi(pci
, PCIE_PORT_LINK_CONTROL
, val
);
590 /* Set link width speed control register */
591 val
= dw_pcie_readl_dbi(pci
, PCIE_LINK_WIDTH_SPEED_CONTROL
);
592 val
&= ~PORT_LOGIC_LINK_WIDTH_MASK
;
595 val
|= PORT_LOGIC_LINK_WIDTH_1_LANES
;
598 val
|= PORT_LOGIC_LINK_WIDTH_2_LANES
;
601 val
|= PORT_LOGIC_LINK_WIDTH_4_LANES
;
604 val
|= PORT_LOGIC_LINK_WIDTH_8_LANES
;
607 dw_pcie_writel_dbi(pci
, PCIE_LINK_WIDTH_SPEED_CONTROL
, val
);
609 if (of_property_read_bool(np
, "snps,enable-cdm-check")) {
610 val
= dw_pcie_readl_dbi(pci
, PCIE_PL_CHK_REG_CONTROL_STATUS
);
611 val
|= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS
|
612 PCIE_PL_CHK_REG_CHK_REG_START
;
613 dw_pcie_writel_dbi(pci
, PCIE_PL_CHK_REG_CONTROL_STATUS
, val
);