treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / pci / controller / dwc / pcie-designware.c
blob681548c88282ce7e582fe4a11d55329f9a64177c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Synopsys DesignWare PCIe host controller driver
5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com
8 * Author: Jingoo Han <jg1.han@samsung.com>
9 */
11 #include <linux/delay.h>
12 #include <linux/of.h>
13 #include <linux/types.h>
15 #include "../../pci.h"
16 #include "pcie-designware.h"
19 * These interfaces resemble the pci_find_*capability() interfaces, but these
20 * are for configuring host controllers, which are bridges *to* PCI devices but
21 * are not PCI devices themselves.
23 static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
24 u8 cap)
26 u8 cap_id, next_cap_ptr;
27 u16 reg;
29 if (!cap_ptr)
30 return 0;
32 reg = dw_pcie_readw_dbi(pci, cap_ptr);
33 cap_id = (reg & 0x00ff);
35 if (cap_id > PCI_CAP_ID_MAX)
36 return 0;
38 if (cap_id == cap)
39 return cap_ptr;
41 next_cap_ptr = (reg & 0xff00) >> 8;
42 return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
45 u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap)
47 u8 next_cap_ptr;
48 u16 reg;
50 reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
51 next_cap_ptr = (reg & 0x00ff);
53 return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
55 EXPORT_SYMBOL_GPL(dw_pcie_find_capability);
57 static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start,
58 u8 cap)
60 u32 header;
61 int ttl;
62 int pos = PCI_CFG_SPACE_SIZE;
64 /* minimum 8 bytes per capability */
65 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
67 if (start)
68 pos = start;
70 header = dw_pcie_readl_dbi(pci, pos);
72 * If we have no capabilities, this is indicated by cap ID,
73 * cap version and next pointer all being 0.
75 if (header == 0)
76 return 0;
78 while (ttl-- > 0) {
79 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
80 return pos;
82 pos = PCI_EXT_CAP_NEXT(header);
83 if (pos < PCI_CFG_SPACE_SIZE)
84 break;
86 header = dw_pcie_readl_dbi(pci, pos);
89 return 0;
92 u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
94 return dw_pcie_find_next_ext_capability(pci, 0, cap);
96 EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
98 int dw_pcie_read(void __iomem *addr, int size, u32 *val)
100 if (!IS_ALIGNED((uintptr_t)addr, size)) {
101 *val = 0;
102 return PCIBIOS_BAD_REGISTER_NUMBER;
105 if (size == 4) {
106 *val = readl(addr);
107 } else if (size == 2) {
108 *val = readw(addr);
109 } else if (size == 1) {
110 *val = readb(addr);
111 } else {
112 *val = 0;
113 return PCIBIOS_BAD_REGISTER_NUMBER;
116 return PCIBIOS_SUCCESSFUL;
118 EXPORT_SYMBOL_GPL(dw_pcie_read);
120 int dw_pcie_write(void __iomem *addr, int size, u32 val)
122 if (!IS_ALIGNED((uintptr_t)addr, size))
123 return PCIBIOS_BAD_REGISTER_NUMBER;
125 if (size == 4)
126 writel(val, addr);
127 else if (size == 2)
128 writew(val, addr);
129 else if (size == 1)
130 writeb(val, addr);
131 else
132 return PCIBIOS_BAD_REGISTER_NUMBER;
134 return PCIBIOS_SUCCESSFUL;
136 EXPORT_SYMBOL_GPL(dw_pcie_write);
138 u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size)
140 int ret;
141 u32 val;
143 if (pci->ops->read_dbi)
144 return pci->ops->read_dbi(pci, pci->dbi_base, reg, size);
146 ret = dw_pcie_read(pci->dbi_base + reg, size, &val);
147 if (ret)
148 dev_err(pci->dev, "Read DBI address failed\n");
150 return val;
152 EXPORT_SYMBOL_GPL(dw_pcie_read_dbi);
154 void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
156 int ret;
158 if (pci->ops->write_dbi) {
159 pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val);
160 return;
163 ret = dw_pcie_write(pci->dbi_base + reg, size, val);
164 if (ret)
165 dev_err(pci->dev, "Write DBI address failed\n");
167 EXPORT_SYMBOL_GPL(dw_pcie_write_dbi);
169 u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size)
171 int ret;
172 u32 val;
174 if (pci->ops->read_dbi2)
175 return pci->ops->read_dbi2(pci, pci->dbi_base2, reg, size);
177 ret = dw_pcie_read(pci->dbi_base2 + reg, size, &val);
178 if (ret)
179 dev_err(pci->dev, "read DBI address failed\n");
181 return val;
184 void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
186 int ret;
188 if (pci->ops->write_dbi2) {
189 pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
190 return;
193 ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
194 if (ret)
195 dev_err(pci->dev, "write DBI address failed\n");
198 u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size)
200 int ret;
201 u32 val;
203 if (pci->ops->read_dbi)
204 return pci->ops->read_dbi(pci, pci->atu_base, reg, size);
206 ret = dw_pcie_read(pci->atu_base + reg, size, &val);
207 if (ret)
208 dev_err(pci->dev, "Read ATU address failed\n");
210 return val;
213 void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
215 int ret;
217 if (pci->ops->write_dbi) {
218 pci->ops->write_dbi(pci, pci->atu_base, reg, size, val);
219 return;
222 ret = dw_pcie_write(pci->atu_base + reg, size, val);
223 if (ret)
224 dev_err(pci->dev, "Write ATU address failed\n");
227 static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
229 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
231 return dw_pcie_readl_atu(pci, offset + reg);
234 static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
235 u32 val)
237 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
239 dw_pcie_writel_atu(pci, offset + reg, val);
242 static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
243 int type, u64 cpu_addr,
244 u64 pci_addr, u32 size)
246 u32 retries, val;
248 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
249 lower_32_bits(cpu_addr));
250 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
251 upper_32_bits(cpu_addr));
252 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT,
253 lower_32_bits(cpu_addr + size - 1));
254 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
255 lower_32_bits(pci_addr));
256 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
257 upper_32_bits(pci_addr));
258 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
259 type);
260 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
261 PCIE_ATU_ENABLE);
264 * Make sure ATU enable takes effect before any subsequent config
265 * and I/O accesses.
267 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
268 val = dw_pcie_readl_ob_unroll(pci, index,
269 PCIE_ATU_UNR_REGION_CTRL2);
270 if (val & PCIE_ATU_ENABLE)
271 return;
273 mdelay(LINK_WAIT_IATU);
275 dev_err(pci->dev, "Outbound iATU is not being enabled\n");
278 void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
279 u64 cpu_addr, u64 pci_addr, u32 size)
281 u32 retries, val;
283 if (pci->ops->cpu_addr_fixup)
284 cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
286 if (pci->iatu_unroll_enabled) {
287 dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr,
288 pci_addr, size);
289 return;
292 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
293 PCIE_ATU_REGION_OUTBOUND | index);
294 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
295 lower_32_bits(cpu_addr));
296 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
297 upper_32_bits(cpu_addr));
298 dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
299 lower_32_bits(cpu_addr + size - 1));
300 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
301 lower_32_bits(pci_addr));
302 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
303 upper_32_bits(pci_addr));
304 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
305 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
308 * Make sure ATU enable takes effect before any subsequent config
309 * and I/O accesses.
311 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
312 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
313 if (val & PCIE_ATU_ENABLE)
314 return;
316 mdelay(LINK_WAIT_IATU);
318 dev_err(pci->dev, "Outbound iATU is not being enabled\n");
321 static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
323 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
325 return dw_pcie_readl_atu(pci, offset + reg);
328 static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
329 u32 val)
331 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
333 dw_pcie_writel_atu(pci, offset + reg, val);
336 static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
337 int bar, u64 cpu_addr,
338 enum dw_pcie_as_type as_type)
340 int type;
341 u32 retries, val;
343 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
344 lower_32_bits(cpu_addr));
345 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
346 upper_32_bits(cpu_addr));
348 switch (as_type) {
349 case DW_PCIE_AS_MEM:
350 type = PCIE_ATU_TYPE_MEM;
351 break;
352 case DW_PCIE_AS_IO:
353 type = PCIE_ATU_TYPE_IO;
354 break;
355 default:
356 return -EINVAL;
359 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type);
360 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
361 PCIE_ATU_ENABLE |
362 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
365 * Make sure ATU enable takes effect before any subsequent config
366 * and I/O accesses.
368 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
369 val = dw_pcie_readl_ib_unroll(pci, index,
370 PCIE_ATU_UNR_REGION_CTRL2);
371 if (val & PCIE_ATU_ENABLE)
372 return 0;
374 mdelay(LINK_WAIT_IATU);
376 dev_err(pci->dev, "Inbound iATU is not being enabled\n");
378 return -EBUSY;
381 int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
382 u64 cpu_addr, enum dw_pcie_as_type as_type)
384 int type;
385 u32 retries, val;
387 if (pci->iatu_unroll_enabled)
388 return dw_pcie_prog_inbound_atu_unroll(pci, index, bar,
389 cpu_addr, as_type);
391 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
392 index);
393 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
394 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
396 switch (as_type) {
397 case DW_PCIE_AS_MEM:
398 type = PCIE_ATU_TYPE_MEM;
399 break;
400 case DW_PCIE_AS_IO:
401 type = PCIE_ATU_TYPE_IO;
402 break;
403 default:
404 return -EINVAL;
407 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
408 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE
409 | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
412 * Make sure ATU enable takes effect before any subsequent config
413 * and I/O accesses.
415 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
416 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
417 if (val & PCIE_ATU_ENABLE)
418 return 0;
420 mdelay(LINK_WAIT_IATU);
422 dev_err(pci->dev, "Inbound iATU is not being enabled\n");
424 return -EBUSY;
427 void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
428 enum dw_pcie_region_type type)
430 int region;
432 switch (type) {
433 case DW_PCIE_REGION_INBOUND:
434 region = PCIE_ATU_REGION_INBOUND;
435 break;
436 case DW_PCIE_REGION_OUTBOUND:
437 region = PCIE_ATU_REGION_OUTBOUND;
438 break;
439 default:
440 return;
443 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
444 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, (u32)~PCIE_ATU_ENABLE);
447 int dw_pcie_wait_for_link(struct dw_pcie *pci)
449 int retries;
451 /* Check if the link is up or not */
452 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
453 if (dw_pcie_link_up(pci)) {
454 dev_info(pci->dev, "Link up\n");
455 return 0;
457 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
460 dev_info(pci->dev, "Phy link never came up\n");
462 return -ETIMEDOUT;
464 EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
466 int dw_pcie_link_up(struct dw_pcie *pci)
468 u32 val;
470 if (pci->ops->link_up)
471 return pci->ops->link_up(pci);
473 val = readl(pci->dbi_base + PCIE_PORT_DEBUG1);
474 return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
475 (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
478 void dw_pcie_upconfig_setup(struct dw_pcie *pci)
480 u32 val;
482 val = dw_pcie_readl_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL);
483 val |= PORT_MLTI_UPCFG_SUPPORT;
484 dw_pcie_writel_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL, val);
486 EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup);
488 void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
490 u32 reg, val;
491 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
493 reg = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
494 reg &= ~PCI_EXP_LNKCTL2_TLS;
496 switch (pcie_link_speed[link_gen]) {
497 case PCIE_SPEED_2_5GT:
498 reg |= PCI_EXP_LNKCTL2_TLS_2_5GT;
499 break;
500 case PCIE_SPEED_5_0GT:
501 reg |= PCI_EXP_LNKCTL2_TLS_5_0GT;
502 break;
503 case PCIE_SPEED_8_0GT:
504 reg |= PCI_EXP_LNKCTL2_TLS_8_0GT;
505 break;
506 case PCIE_SPEED_16_0GT:
507 reg |= PCI_EXP_LNKCTL2_TLS_16_0GT;
508 break;
509 default:
510 /* Use hardware capability */
511 val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
512 val = FIELD_GET(PCI_EXP_LNKCAP_SLS, val);
513 reg &= ~PCI_EXP_LNKCTL2_HASD;
514 reg |= FIELD_PREP(PCI_EXP_LNKCTL2_TLS, val);
515 break;
518 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, reg);
520 EXPORT_SYMBOL_GPL(dw_pcie_link_set_max_speed);
522 void dw_pcie_link_set_n_fts(struct dw_pcie *pci, u32 n_fts)
524 u32 val;
526 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
527 val &= ~PORT_LOGIC_N_FTS_MASK;
528 val |= n_fts & PORT_LOGIC_N_FTS_MASK;
529 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
531 EXPORT_SYMBOL_GPL(dw_pcie_link_set_n_fts);
533 static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
535 u32 val;
537 val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
538 if (val == 0xffffffff)
539 return 1;
541 return 0;
544 void dw_pcie_setup(struct dw_pcie *pci)
546 int ret;
547 u32 val;
548 u32 lanes;
549 struct device *dev = pci->dev;
550 struct device_node *np = dev->of_node;
552 if (pci->version >= 0x480A || (!pci->version &&
553 dw_pcie_iatu_unroll_enabled(pci))) {
554 pci->iatu_unroll_enabled = true;
555 if (!pci->atu_base)
556 pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
558 dev_dbg(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
559 "enabled" : "disabled");
562 ret = of_property_read_u32(np, "num-lanes", &lanes);
563 if (ret) {
564 dev_dbg(pci->dev, "property num-lanes isn't found\n");
565 return;
568 /* Set the number of lanes */
569 val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
570 val &= ~PORT_LINK_MODE_MASK;
571 switch (lanes) {
572 case 1:
573 val |= PORT_LINK_MODE_1_LANES;
574 break;
575 case 2:
576 val |= PORT_LINK_MODE_2_LANES;
577 break;
578 case 4:
579 val |= PORT_LINK_MODE_4_LANES;
580 break;
581 case 8:
582 val |= PORT_LINK_MODE_8_LANES;
583 break;
584 default:
585 dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes);
586 return;
588 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
590 /* Set link width speed control register */
591 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
592 val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
593 switch (lanes) {
594 case 1:
595 val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
596 break;
597 case 2:
598 val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
599 break;
600 case 4:
601 val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
602 break;
603 case 8:
604 val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
605 break;
607 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
609 if (of_property_read_bool(np, "snps,enable-cdm-check")) {
610 val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
611 val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
612 PCIE_PL_CHK_REG_CHK_REG_START;
613 dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);