1 // SPDX-License-Identifier: GPL-2.0
3 * Synopsys DesignWare PCIe Endpoint controller driver
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
10 #include <linux/platform_device.h>
12 #include "pcie-designware.h"
13 #include <linux/pci-epc.h>
14 #include <linux/pci-epf.h>
16 #include "../../pci.h"
18 void dw_pcie_ep_linkup(struct dw_pcie_ep
*ep
)
20 struct pci_epc
*epc
= ep
->epc
;
24 EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup
);
26 void dw_pcie_ep_init_notify(struct dw_pcie_ep
*ep
)
28 struct pci_epc
*epc
= ep
->epc
;
30 pci_epc_init_notify(epc
);
32 EXPORT_SYMBOL_GPL(dw_pcie_ep_init_notify
);
34 struct dw_pcie_ep_func
*
35 dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep
*ep
, u8 func_no
)
37 struct dw_pcie_ep_func
*ep_func
;
39 list_for_each_entry(ep_func
, &ep
->func_list
, list
) {
40 if (ep_func
->func_no
== func_no
)
47 static unsigned int dw_pcie_ep_func_select(struct dw_pcie_ep
*ep
, u8 func_no
)
49 unsigned int func_offset
= 0;
51 if (ep
->ops
->func_conf_select
)
52 func_offset
= ep
->ops
->func_conf_select(ep
, func_no
);
57 static void __dw_pcie_ep_reset_bar(struct dw_pcie
*pci
, u8 func_no
,
58 enum pci_barno bar
, int flags
)
61 unsigned int func_offset
= 0;
62 struct dw_pcie_ep
*ep
= &pci
->ep
;
64 func_offset
= dw_pcie_ep_func_select(ep
, func_no
);
66 reg
= func_offset
+ PCI_BASE_ADDRESS_0
+ (4 * bar
);
67 dw_pcie_dbi_ro_wr_en(pci
);
68 dw_pcie_writel_dbi2(pci
, reg
, 0x0);
69 dw_pcie_writel_dbi(pci
, reg
, 0x0);
70 if (flags
& PCI_BASE_ADDRESS_MEM_TYPE_64
) {
71 dw_pcie_writel_dbi2(pci
, reg
+ 4, 0x0);
72 dw_pcie_writel_dbi(pci
, reg
+ 4, 0x0);
74 dw_pcie_dbi_ro_wr_dis(pci
);
77 void dw_pcie_ep_reset_bar(struct dw_pcie
*pci
, enum pci_barno bar
)
81 funcs
= pci
->ep
.epc
->max_functions
;
83 for (func_no
= 0; func_no
< funcs
; func_no
++)
84 __dw_pcie_ep_reset_bar(pci
, func_no
, bar
, 0);
87 static u8
__dw_pcie_ep_find_next_cap(struct dw_pcie_ep
*ep
, u8 func_no
,
90 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
91 unsigned int func_offset
= 0;
92 u8 cap_id
, next_cap_ptr
;
98 func_offset
= dw_pcie_ep_func_select(ep
, func_no
);
100 reg
= dw_pcie_readw_dbi(pci
, func_offset
+ cap_ptr
);
101 cap_id
= (reg
& 0x00ff);
103 if (cap_id
> PCI_CAP_ID_MAX
)
109 next_cap_ptr
= (reg
& 0xff00) >> 8;
110 return __dw_pcie_ep_find_next_cap(ep
, func_no
, next_cap_ptr
, cap
);
113 static u8
dw_pcie_ep_find_capability(struct dw_pcie_ep
*ep
, u8 func_no
, u8 cap
)
115 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
116 unsigned int func_offset
= 0;
120 func_offset
= dw_pcie_ep_func_select(ep
, func_no
);
122 reg
= dw_pcie_readw_dbi(pci
, func_offset
+ PCI_CAPABILITY_LIST
);
123 next_cap_ptr
= (reg
& 0x00ff);
125 return __dw_pcie_ep_find_next_cap(ep
, func_no
, next_cap_ptr
, cap
);
128 static int dw_pcie_ep_write_header(struct pci_epc
*epc
, u8 func_no
,
129 struct pci_epf_header
*hdr
)
131 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
132 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
133 unsigned int func_offset
= 0;
135 func_offset
= dw_pcie_ep_func_select(ep
, func_no
);
137 dw_pcie_dbi_ro_wr_en(pci
);
138 dw_pcie_writew_dbi(pci
, func_offset
+ PCI_VENDOR_ID
, hdr
->vendorid
);
139 dw_pcie_writew_dbi(pci
, func_offset
+ PCI_DEVICE_ID
, hdr
->deviceid
);
140 dw_pcie_writeb_dbi(pci
, func_offset
+ PCI_REVISION_ID
, hdr
->revid
);
141 dw_pcie_writeb_dbi(pci
, func_offset
+ PCI_CLASS_PROG
, hdr
->progif_code
);
142 dw_pcie_writew_dbi(pci
, func_offset
+ PCI_CLASS_DEVICE
,
143 hdr
->subclass_code
| hdr
->baseclass_code
<< 8);
144 dw_pcie_writeb_dbi(pci
, func_offset
+ PCI_CACHE_LINE_SIZE
,
145 hdr
->cache_line_size
);
146 dw_pcie_writew_dbi(pci
, func_offset
+ PCI_SUBSYSTEM_VENDOR_ID
,
147 hdr
->subsys_vendor_id
);
148 dw_pcie_writew_dbi(pci
, func_offset
+ PCI_SUBSYSTEM_ID
, hdr
->subsys_id
);
149 dw_pcie_writeb_dbi(pci
, func_offset
+ PCI_INTERRUPT_PIN
,
151 dw_pcie_dbi_ro_wr_dis(pci
);
156 static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep
*ep
, u8 func_no
,
157 enum pci_barno bar
, dma_addr_t cpu_addr
,
158 enum dw_pcie_as_type as_type
)
162 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
164 free_win
= find_first_zero_bit(ep
->ib_window_map
, pci
->num_ib_windows
);
165 if (free_win
>= pci
->num_ib_windows
) {
166 dev_err(pci
->dev
, "No free inbound window\n");
170 ret
= dw_pcie_prog_inbound_atu(pci
, func_no
, free_win
, bar
, cpu_addr
,
173 dev_err(pci
->dev
, "Failed to program IB window\n");
177 ep
->bar_to_atu
[bar
] = free_win
;
178 set_bit(free_win
, ep
->ib_window_map
);
183 static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep
*ep
, u8 func_no
,
184 phys_addr_t phys_addr
,
185 u64 pci_addr
, size_t size
)
188 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
190 free_win
= find_first_zero_bit(ep
->ob_window_map
, pci
->num_ob_windows
);
191 if (free_win
>= pci
->num_ob_windows
) {
192 dev_err(pci
->dev
, "No free outbound window\n");
196 dw_pcie_prog_ep_outbound_atu(pci
, func_no
, free_win
, PCIE_ATU_TYPE_MEM
,
197 phys_addr
, pci_addr
, size
);
199 set_bit(free_win
, ep
->ob_window_map
);
200 ep
->outbound_addr
[free_win
] = phys_addr
;
205 static void dw_pcie_ep_clear_bar(struct pci_epc
*epc
, u8 func_no
,
206 struct pci_epf_bar
*epf_bar
)
208 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
209 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
210 enum pci_barno bar
= epf_bar
->barno
;
211 u32 atu_index
= ep
->bar_to_atu
[bar
];
213 __dw_pcie_ep_reset_bar(pci
, func_no
, bar
, epf_bar
->flags
);
215 dw_pcie_disable_atu(pci
, atu_index
, DW_PCIE_REGION_INBOUND
);
216 clear_bit(atu_index
, ep
->ib_window_map
);
217 ep
->epf_bar
[bar
] = NULL
;
220 static int dw_pcie_ep_set_bar(struct pci_epc
*epc
, u8 func_no
,
221 struct pci_epf_bar
*epf_bar
)
224 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
225 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
226 enum pci_barno bar
= epf_bar
->barno
;
227 size_t size
= epf_bar
->size
;
228 int flags
= epf_bar
->flags
;
229 enum dw_pcie_as_type as_type
;
231 unsigned int func_offset
= 0;
233 func_offset
= dw_pcie_ep_func_select(ep
, func_no
);
235 reg
= PCI_BASE_ADDRESS_0
+ (4 * bar
) + func_offset
;
237 if (!(flags
& PCI_BASE_ADDRESS_SPACE
))
238 as_type
= DW_PCIE_AS_MEM
;
240 as_type
= DW_PCIE_AS_IO
;
242 ret
= dw_pcie_ep_inbound_atu(ep
, func_no
, bar
,
243 epf_bar
->phys_addr
, as_type
);
247 dw_pcie_dbi_ro_wr_en(pci
);
249 dw_pcie_writel_dbi2(pci
, reg
, lower_32_bits(size
- 1));
250 dw_pcie_writel_dbi(pci
, reg
, flags
);
252 if (flags
& PCI_BASE_ADDRESS_MEM_TYPE_64
) {
253 dw_pcie_writel_dbi2(pci
, reg
+ 4, upper_32_bits(size
- 1));
254 dw_pcie_writel_dbi(pci
, reg
+ 4, 0);
257 ep
->epf_bar
[bar
] = epf_bar
;
258 dw_pcie_dbi_ro_wr_dis(pci
);
263 static int dw_pcie_find_index(struct dw_pcie_ep
*ep
, phys_addr_t addr
,
267 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
269 for (index
= 0; index
< pci
->num_ob_windows
; index
++) {
270 if (ep
->outbound_addr
[index
] != addr
)
279 static void dw_pcie_ep_unmap_addr(struct pci_epc
*epc
, u8 func_no
,
284 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
285 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
287 ret
= dw_pcie_find_index(ep
, addr
, &atu_index
);
291 dw_pcie_disable_atu(pci
, atu_index
, DW_PCIE_REGION_OUTBOUND
);
292 clear_bit(atu_index
, ep
->ob_window_map
);
295 static int dw_pcie_ep_map_addr(struct pci_epc
*epc
, u8 func_no
,
297 u64 pci_addr
, size_t size
)
300 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
301 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
303 ret
= dw_pcie_ep_outbound_atu(ep
, func_no
, addr
, pci_addr
, size
);
305 dev_err(pci
->dev
, "Failed to enable address\n");
312 static int dw_pcie_ep_get_msi(struct pci_epc
*epc
, u8 func_no
)
314 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
315 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
317 unsigned int func_offset
= 0;
318 struct dw_pcie_ep_func
*ep_func
;
320 ep_func
= dw_pcie_ep_get_func_from_ep(ep
, func_no
);
321 if (!ep_func
|| !ep_func
->msi_cap
)
324 func_offset
= dw_pcie_ep_func_select(ep
, func_no
);
326 reg
= ep_func
->msi_cap
+ func_offset
+ PCI_MSI_FLAGS
;
327 val
= dw_pcie_readw_dbi(pci
, reg
);
328 if (!(val
& PCI_MSI_FLAGS_ENABLE
))
331 val
= (val
& PCI_MSI_FLAGS_QSIZE
) >> 4;
336 static int dw_pcie_ep_set_msi(struct pci_epc
*epc
, u8 func_no
, u8 interrupts
)
338 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
339 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
341 unsigned int func_offset
= 0;
342 struct dw_pcie_ep_func
*ep_func
;
344 ep_func
= dw_pcie_ep_get_func_from_ep(ep
, func_no
);
345 if (!ep_func
|| !ep_func
->msi_cap
)
348 func_offset
= dw_pcie_ep_func_select(ep
, func_no
);
350 reg
= ep_func
->msi_cap
+ func_offset
+ PCI_MSI_FLAGS
;
351 val
= dw_pcie_readw_dbi(pci
, reg
);
352 val
&= ~PCI_MSI_FLAGS_QMASK
;
353 val
|= (interrupts
<< 1) & PCI_MSI_FLAGS_QMASK
;
354 dw_pcie_dbi_ro_wr_en(pci
);
355 dw_pcie_writew_dbi(pci
, reg
, val
);
356 dw_pcie_dbi_ro_wr_dis(pci
);
361 static int dw_pcie_ep_get_msix(struct pci_epc
*epc
, u8 func_no
)
363 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
364 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
366 unsigned int func_offset
= 0;
367 struct dw_pcie_ep_func
*ep_func
;
369 ep_func
= dw_pcie_ep_get_func_from_ep(ep
, func_no
);
370 if (!ep_func
|| !ep_func
->msix_cap
)
373 func_offset
= dw_pcie_ep_func_select(ep
, func_no
);
375 reg
= ep_func
->msix_cap
+ func_offset
+ PCI_MSIX_FLAGS
;
376 val
= dw_pcie_readw_dbi(pci
, reg
);
377 if (!(val
& PCI_MSIX_FLAGS_ENABLE
))
380 val
&= PCI_MSIX_FLAGS_QSIZE
;
385 static int dw_pcie_ep_set_msix(struct pci_epc
*epc
, u8 func_no
, u16 interrupts
,
386 enum pci_barno bir
, u32 offset
)
388 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
389 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
391 unsigned int func_offset
= 0;
392 struct dw_pcie_ep_func
*ep_func
;
394 ep_func
= dw_pcie_ep_get_func_from_ep(ep
, func_no
);
395 if (!ep_func
|| !ep_func
->msix_cap
)
398 dw_pcie_dbi_ro_wr_en(pci
);
400 func_offset
= dw_pcie_ep_func_select(ep
, func_no
);
402 reg
= ep_func
->msix_cap
+ func_offset
+ PCI_MSIX_FLAGS
;
403 val
= dw_pcie_readw_dbi(pci
, reg
);
404 val
&= ~PCI_MSIX_FLAGS_QSIZE
;
406 dw_pcie_writew_dbi(pci
, reg
, val
);
408 reg
= ep_func
->msix_cap
+ func_offset
+ PCI_MSIX_TABLE
;
410 dw_pcie_writel_dbi(pci
, reg
, val
);
412 reg
= ep_func
->msix_cap
+ func_offset
+ PCI_MSIX_PBA
;
413 val
= (offset
+ (interrupts
* PCI_MSIX_ENTRY_SIZE
)) | bir
;
414 dw_pcie_writel_dbi(pci
, reg
, val
);
416 dw_pcie_dbi_ro_wr_dis(pci
);
421 static int dw_pcie_ep_raise_irq(struct pci_epc
*epc
, u8 func_no
,
422 enum pci_epc_irq_type type
, u16 interrupt_num
)
424 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
426 if (!ep
->ops
->raise_irq
)
429 return ep
->ops
->raise_irq(ep
, func_no
, type
, interrupt_num
);
432 static void dw_pcie_ep_stop(struct pci_epc
*epc
)
434 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
435 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
437 if (!pci
->ops
->stop_link
)
440 pci
->ops
->stop_link(pci
);
443 static int dw_pcie_ep_start(struct pci_epc
*epc
)
445 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
446 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
448 if (!pci
->ops
->start_link
)
451 return pci
->ops
->start_link(pci
);
454 static const struct pci_epc_features
*
455 dw_pcie_ep_get_features(struct pci_epc
*epc
, u8 func_no
)
457 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
459 if (!ep
->ops
->get_features
)
462 return ep
->ops
->get_features(ep
);
465 static const struct pci_epc_ops epc_ops
= {
466 .write_header
= dw_pcie_ep_write_header
,
467 .set_bar
= dw_pcie_ep_set_bar
,
468 .clear_bar
= dw_pcie_ep_clear_bar
,
469 .map_addr
= dw_pcie_ep_map_addr
,
470 .unmap_addr
= dw_pcie_ep_unmap_addr
,
471 .set_msi
= dw_pcie_ep_set_msi
,
472 .get_msi
= dw_pcie_ep_get_msi
,
473 .set_msix
= dw_pcie_ep_set_msix
,
474 .get_msix
= dw_pcie_ep_get_msix
,
475 .raise_irq
= dw_pcie_ep_raise_irq
,
476 .start
= dw_pcie_ep_start
,
477 .stop
= dw_pcie_ep_stop
,
478 .get_features
= dw_pcie_ep_get_features
,
481 int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep
*ep
, u8 func_no
)
483 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
484 struct device
*dev
= pci
->dev
;
486 dev_err(dev
, "EP cannot trigger legacy IRQs\n");
491 int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep
*ep
, u8 func_no
,
494 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
495 struct dw_pcie_ep_func
*ep_func
;
496 struct pci_epc
*epc
= ep
->epc
;
497 unsigned int aligned_offset
;
498 unsigned int func_offset
= 0;
499 u16 msg_ctrl
, msg_data
;
500 u32 msg_addr_lower
, msg_addr_upper
, reg
;
505 ep_func
= dw_pcie_ep_get_func_from_ep(ep
, func_no
);
506 if (!ep_func
|| !ep_func
->msi_cap
)
509 func_offset
= dw_pcie_ep_func_select(ep
, func_no
);
511 /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
512 reg
= ep_func
->msi_cap
+ func_offset
+ PCI_MSI_FLAGS
;
513 msg_ctrl
= dw_pcie_readw_dbi(pci
, reg
);
514 has_upper
= !!(msg_ctrl
& PCI_MSI_FLAGS_64BIT
);
515 reg
= ep_func
->msi_cap
+ func_offset
+ PCI_MSI_ADDRESS_LO
;
516 msg_addr_lower
= dw_pcie_readl_dbi(pci
, reg
);
518 reg
= ep_func
->msi_cap
+ func_offset
+ PCI_MSI_ADDRESS_HI
;
519 msg_addr_upper
= dw_pcie_readl_dbi(pci
, reg
);
520 reg
= ep_func
->msi_cap
+ func_offset
+ PCI_MSI_DATA_64
;
521 msg_data
= dw_pcie_readw_dbi(pci
, reg
);
524 reg
= ep_func
->msi_cap
+ func_offset
+ PCI_MSI_DATA_32
;
525 msg_data
= dw_pcie_readw_dbi(pci
, reg
);
527 aligned_offset
= msg_addr_lower
& (epc
->mem
->window
.page_size
- 1);
528 msg_addr
= ((u64
)msg_addr_upper
) << 32 |
529 (msg_addr_lower
& ~aligned_offset
);
530 ret
= dw_pcie_ep_map_addr(epc
, func_no
, ep
->msi_mem_phys
, msg_addr
,
531 epc
->mem
->window
.page_size
);
535 writel(msg_data
| (interrupt_num
- 1), ep
->msi_mem
+ aligned_offset
);
537 dw_pcie_ep_unmap_addr(epc
, func_no
, ep
->msi_mem_phys
);
542 int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep
*ep
, u8 func_no
,
545 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
546 struct dw_pcie_ep_func
*ep_func
;
549 ep_func
= dw_pcie_ep_get_func_from_ep(ep
, func_no
);
550 if (!ep_func
|| !ep_func
->msix_cap
)
553 msg_data
= (func_no
<< PCIE_MSIX_DOORBELL_PF_SHIFT
) |
556 dw_pcie_writel_dbi(pci
, PCIE_MSIX_DOORBELL
, msg_data
);
561 int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep
*ep
, u8 func_no
,
564 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
565 struct dw_pcie_ep_func
*ep_func
;
566 struct pci_epf_msix_tbl
*msix_tbl
;
567 struct pci_epc
*epc
= ep
->epc
;
568 unsigned int func_offset
= 0;
569 u32 reg
, msg_data
, vec_ctrl
;
570 unsigned int aligned_offset
;
576 ep_func
= dw_pcie_ep_get_func_from_ep(ep
, func_no
);
577 if (!ep_func
|| !ep_func
->msix_cap
)
580 func_offset
= dw_pcie_ep_func_select(ep
, func_no
);
582 reg
= ep_func
->msix_cap
+ func_offset
+ PCI_MSIX_TABLE
;
583 tbl_offset
= dw_pcie_readl_dbi(pci
, reg
);
584 bir
= (tbl_offset
& PCI_MSIX_TABLE_BIR
);
585 tbl_offset
&= PCI_MSIX_TABLE_OFFSET
;
587 msix_tbl
= ep
->epf_bar
[bir
]->addr
+ tbl_offset
;
588 msg_addr
= msix_tbl
[(interrupt_num
- 1)].msg_addr
;
589 msg_data
= msix_tbl
[(interrupt_num
- 1)].msg_data
;
590 vec_ctrl
= msix_tbl
[(interrupt_num
- 1)].vector_ctrl
;
592 if (vec_ctrl
& PCI_MSIX_ENTRY_CTRL_MASKBIT
) {
593 dev_dbg(pci
->dev
, "MSI-X entry ctrl set\n");
597 aligned_offset
= msg_addr
& (epc
->mem
->window
.page_size
- 1);
598 ret
= dw_pcie_ep_map_addr(epc
, func_no
, ep
->msi_mem_phys
, msg_addr
,
599 epc
->mem
->window
.page_size
);
603 writel(msg_data
, ep
->msi_mem
+ aligned_offset
);
605 dw_pcie_ep_unmap_addr(epc
, func_no
, ep
->msi_mem_phys
);
610 void dw_pcie_ep_exit(struct dw_pcie_ep
*ep
)
612 struct pci_epc
*epc
= ep
->epc
;
614 pci_epc_mem_free_addr(epc
, ep
->msi_mem_phys
, ep
->msi_mem
,
615 epc
->mem
->window
.page_size
);
617 pci_epc_mem_exit(epc
);
620 static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie
*pci
, int cap
)
623 int pos
= PCI_CFG_SPACE_SIZE
;
626 header
= dw_pcie_readl_dbi(pci
, pos
);
627 if (PCI_EXT_CAP_ID(header
) == cap
)
630 pos
= PCI_EXT_CAP_NEXT(header
);
638 int dw_pcie_ep_init_complete(struct dw_pcie_ep
*ep
)
640 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
647 hdr_type
= dw_pcie_readb_dbi(pci
, PCI_HEADER_TYPE
) &
648 PCI_HEADER_TYPE_MASK
;
649 if (hdr_type
!= PCI_HEADER_TYPE_NORMAL
) {
651 "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
656 offset
= dw_pcie_ep_find_ext_capability(pci
, PCI_EXT_CAP_ID_REBAR
);
658 dw_pcie_dbi_ro_wr_en(pci
);
661 reg
= dw_pcie_readl_dbi(pci
, offset
+ PCI_REBAR_CTRL
);
662 nbars
= (reg
& PCI_REBAR_CTRL_NBAR_MASK
) >>
663 PCI_REBAR_CTRL_NBAR_SHIFT
;
665 for (i
= 0; i
< nbars
; i
++, offset
+= PCI_REBAR_CTRL
)
666 dw_pcie_writel_dbi(pci
, offset
+ PCI_REBAR_CAP
, 0x0);
670 dw_pcie_dbi_ro_wr_dis(pci
);
674 EXPORT_SYMBOL_GPL(dw_pcie_ep_init_complete
);
676 int dw_pcie_ep_init(struct dw_pcie_ep
*ep
)
681 struct resource
*res
;
683 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
684 struct device
*dev
= pci
->dev
;
685 struct platform_device
*pdev
= to_platform_device(dev
);
686 struct device_node
*np
= dev
->of_node
;
687 const struct pci_epc_features
*epc_features
;
688 struct dw_pcie_ep_func
*ep_func
;
690 INIT_LIST_HEAD(&ep
->func_list
);
692 if (!pci
->dbi_base
) {
693 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "dbi");
694 pci
->dbi_base
= devm_pci_remap_cfg_resource(dev
, res
);
695 if (IS_ERR(pci
->dbi_base
))
696 return PTR_ERR(pci
->dbi_base
);
699 if (!pci
->dbi_base2
) {
700 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "dbi2");
702 pci
->dbi_base2
= pci
->dbi_base
+ SZ_4K
;
704 pci
->dbi_base2
= devm_pci_remap_cfg_resource(dev
, res
);
705 if (IS_ERR(pci
->dbi_base2
))
706 return PTR_ERR(pci
->dbi_base2
);
710 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "addr_space");
714 ep
->phys_base
= res
->start
;
715 ep
->addr_size
= resource_size(res
);
717 ep
->ib_window_map
= devm_kcalloc(dev
,
718 BITS_TO_LONGS(pci
->num_ib_windows
),
721 if (!ep
->ib_window_map
)
724 ep
->ob_window_map
= devm_kcalloc(dev
,
725 BITS_TO_LONGS(pci
->num_ob_windows
),
728 if (!ep
->ob_window_map
)
731 addr
= devm_kcalloc(dev
, pci
->num_ob_windows
, sizeof(phys_addr_t
),
735 ep
->outbound_addr
= addr
;
737 if (pci
->link_gen
< 1)
738 pci
->link_gen
= of_pci_get_max_link_speed(np
);
740 epc
= devm_pci_epc_create(dev
, &epc_ops
);
742 dev_err(dev
, "Failed to create epc device\n");
747 epc_set_drvdata(epc
, ep
);
749 ret
= of_property_read_u8(np
, "max-functions", &epc
->max_functions
);
751 epc
->max_functions
= 1;
753 for (func_no
= 0; func_no
< epc
->max_functions
; func_no
++) {
754 ep_func
= devm_kzalloc(dev
, sizeof(*ep_func
), GFP_KERNEL
);
758 ep_func
->func_no
= func_no
;
759 ep_func
->msi_cap
= dw_pcie_ep_find_capability(ep
, func_no
,
761 ep_func
->msix_cap
= dw_pcie_ep_find_capability(ep
, func_no
,
764 list_add_tail(&ep_func
->list
, &ep
->func_list
);
767 if (ep
->ops
->ep_init
)
768 ep
->ops
->ep_init(ep
);
770 ret
= pci_epc_mem_init(epc
, ep
->phys_base
, ep
->addr_size
,
773 dev_err(dev
, "Failed to initialize address space\n");
777 ep
->msi_mem
= pci_epc_mem_alloc_addr(epc
, &ep
->msi_mem_phys
,
778 epc
->mem
->window
.page_size
);
780 dev_err(dev
, "Failed to reserve memory for MSI/MSI-X\n");
784 if (ep
->ops
->get_features
) {
785 epc_features
= ep
->ops
->get_features(ep
);
786 if (epc_features
->core_init_notifier
)
790 return dw_pcie_ep_init_complete(ep
);
792 EXPORT_SYMBOL_GPL(dw_pcie_ep_init
);