1 // SPDX-License-Identifier: GPL-2.0
3 * Synopsys DesignWare PCIe Endpoint controller driver
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
9 #include <linux/align.h>
10 #include <linux/bitfield.h>
12 #include <linux/platform_device.h>
14 #include "pcie-designware.h"
15 #include <linux/pci-epc.h>
16 #include <linux/pci-epf.h>
19 * dw_pcie_ep_get_func_from_ep - Get the struct dw_pcie_ep_func corresponding to
20 * the endpoint function
22 * @func_no: Function number of the endpoint device
24 * Return: struct dw_pcie_ep_func if success, NULL otherwise.
26 struct dw_pcie_ep_func
*
27 dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep
*ep
, u8 func_no
)
29 struct dw_pcie_ep_func
*ep_func
;
31 list_for_each_entry(ep_func
, &ep
->func_list
, list
) {
32 if (ep_func
->func_no
== func_no
)
39 static void __dw_pcie_ep_reset_bar(struct dw_pcie
*pci
, u8 func_no
,
40 enum pci_barno bar
, int flags
)
42 struct dw_pcie_ep
*ep
= &pci
->ep
;
45 reg
= PCI_BASE_ADDRESS_0
+ (4 * bar
);
46 dw_pcie_dbi_ro_wr_en(pci
);
47 dw_pcie_ep_writel_dbi2(ep
, func_no
, reg
, 0x0);
48 dw_pcie_ep_writel_dbi(ep
, func_no
, reg
, 0x0);
49 if (flags
& PCI_BASE_ADDRESS_MEM_TYPE_64
) {
50 dw_pcie_ep_writel_dbi2(ep
, func_no
, reg
+ 4, 0x0);
51 dw_pcie_ep_writel_dbi(ep
, func_no
, reg
+ 4, 0x0);
53 dw_pcie_dbi_ro_wr_dis(pci
);
57 * dw_pcie_ep_reset_bar - Reset endpoint BAR
58 * @pci: DWC PCI device
59 * @bar: BAR number of the endpoint
61 void dw_pcie_ep_reset_bar(struct dw_pcie
*pci
, enum pci_barno bar
)
65 funcs
= pci
->ep
.epc
->max_functions
;
67 for (func_no
= 0; func_no
< funcs
; func_no
++)
68 __dw_pcie_ep_reset_bar(pci
, func_no
, bar
, 0);
70 EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar
);
72 static u8
__dw_pcie_ep_find_next_cap(struct dw_pcie_ep
*ep
, u8 func_no
,
75 u8 cap_id
, next_cap_ptr
;
81 reg
= dw_pcie_ep_readw_dbi(ep
, func_no
, cap_ptr
);
82 cap_id
= (reg
& 0x00ff);
84 if (cap_id
> PCI_CAP_ID_MAX
)
90 next_cap_ptr
= (reg
& 0xff00) >> 8;
91 return __dw_pcie_ep_find_next_cap(ep
, func_no
, next_cap_ptr
, cap
);
94 static u8
dw_pcie_ep_find_capability(struct dw_pcie_ep
*ep
, u8 func_no
, u8 cap
)
99 reg
= dw_pcie_ep_readw_dbi(ep
, func_no
, PCI_CAPABILITY_LIST
);
100 next_cap_ptr
= (reg
& 0x00ff);
102 return __dw_pcie_ep_find_next_cap(ep
, func_no
, next_cap_ptr
, cap
);
105 static int dw_pcie_ep_write_header(struct pci_epc
*epc
, u8 func_no
, u8 vfunc_no
,
106 struct pci_epf_header
*hdr
)
108 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
109 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
111 dw_pcie_dbi_ro_wr_en(pci
);
112 dw_pcie_ep_writew_dbi(ep
, func_no
, PCI_VENDOR_ID
, hdr
->vendorid
);
113 dw_pcie_ep_writew_dbi(ep
, func_no
, PCI_DEVICE_ID
, hdr
->deviceid
);
114 dw_pcie_ep_writeb_dbi(ep
, func_no
, PCI_REVISION_ID
, hdr
->revid
);
115 dw_pcie_ep_writeb_dbi(ep
, func_no
, PCI_CLASS_PROG
, hdr
->progif_code
);
116 dw_pcie_ep_writew_dbi(ep
, func_no
, PCI_CLASS_DEVICE
,
117 hdr
->subclass_code
| hdr
->baseclass_code
<< 8);
118 dw_pcie_ep_writeb_dbi(ep
, func_no
, PCI_CACHE_LINE_SIZE
,
119 hdr
->cache_line_size
);
120 dw_pcie_ep_writew_dbi(ep
, func_no
, PCI_SUBSYSTEM_VENDOR_ID
,
121 hdr
->subsys_vendor_id
);
122 dw_pcie_ep_writew_dbi(ep
, func_no
, PCI_SUBSYSTEM_ID
, hdr
->subsys_id
);
123 dw_pcie_ep_writeb_dbi(ep
, func_no
, PCI_INTERRUPT_PIN
,
125 dw_pcie_dbi_ro_wr_dis(pci
);
130 static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep
*ep
, u8 func_no
, int type
,
131 dma_addr_t cpu_addr
, enum pci_barno bar
)
135 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
137 if (!ep
->bar_to_atu
[bar
])
138 free_win
= find_first_zero_bit(ep
->ib_window_map
, pci
->num_ib_windows
);
140 free_win
= ep
->bar_to_atu
[bar
] - 1;
142 if (free_win
>= pci
->num_ib_windows
) {
143 dev_err(pci
->dev
, "No free inbound window\n");
147 ret
= dw_pcie_prog_ep_inbound_atu(pci
, func_no
, free_win
, type
,
150 dev_err(pci
->dev
, "Failed to program IB window\n");
155 * Always increment free_win before assignment, since value 0 is used to identify
156 * unallocated mapping.
158 ep
->bar_to_atu
[bar
] = free_win
+ 1;
159 set_bit(free_win
, ep
->ib_window_map
);
164 static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep
*ep
,
165 struct dw_pcie_ob_atu_cfg
*atu
)
167 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
171 free_win
= find_first_zero_bit(ep
->ob_window_map
, pci
->num_ob_windows
);
172 if (free_win
>= pci
->num_ob_windows
) {
173 dev_err(pci
->dev
, "No free outbound window\n");
177 atu
->index
= free_win
;
178 ret
= dw_pcie_prog_outbound_atu(pci
, atu
);
182 set_bit(free_win
, ep
->ob_window_map
);
183 ep
->outbound_addr
[free_win
] = atu
->cpu_addr
;
188 static void dw_pcie_ep_clear_bar(struct pci_epc
*epc
, u8 func_no
, u8 vfunc_no
,
189 struct pci_epf_bar
*epf_bar
)
191 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
192 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
193 enum pci_barno bar
= epf_bar
->barno
;
194 u32 atu_index
= ep
->bar_to_atu
[bar
] - 1;
196 if (!ep
->bar_to_atu
[bar
])
199 __dw_pcie_ep_reset_bar(pci
, func_no
, bar
, epf_bar
->flags
);
201 dw_pcie_disable_atu(pci
, PCIE_ATU_REGION_DIR_IB
, atu_index
);
202 clear_bit(atu_index
, ep
->ib_window_map
);
203 ep
->epf_bar
[bar
] = NULL
;
204 ep
->bar_to_atu
[bar
] = 0;
207 static int dw_pcie_ep_set_bar(struct pci_epc
*epc
, u8 func_no
, u8 vfunc_no
,
208 struct pci_epf_bar
*epf_bar
)
210 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
211 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
212 enum pci_barno bar
= epf_bar
->barno
;
213 size_t size
= epf_bar
->size
;
214 int flags
= epf_bar
->flags
;
219 * DWC does not allow BAR pairs to overlap, e.g. you cannot combine BARs
220 * 1 and 2 to form a 64-bit BAR.
222 if ((flags
& PCI_BASE_ADDRESS_MEM_TYPE_64
) && (bar
& 1))
225 reg
= PCI_BASE_ADDRESS_0
+ (4 * bar
);
227 if (!(flags
& PCI_BASE_ADDRESS_SPACE
))
228 type
= PCIE_ATU_TYPE_MEM
;
230 type
= PCIE_ATU_TYPE_IO
;
232 ret
= dw_pcie_ep_inbound_atu(ep
, func_no
, type
, epf_bar
->phys_addr
, bar
);
236 if (ep
->epf_bar
[bar
])
239 dw_pcie_dbi_ro_wr_en(pci
);
241 dw_pcie_ep_writel_dbi2(ep
, func_no
, reg
, lower_32_bits(size
- 1));
242 dw_pcie_ep_writel_dbi(ep
, func_no
, reg
, flags
);
244 if (flags
& PCI_BASE_ADDRESS_MEM_TYPE_64
) {
245 dw_pcie_ep_writel_dbi2(ep
, func_no
, reg
+ 4, upper_32_bits(size
- 1));
246 dw_pcie_ep_writel_dbi(ep
, func_no
, reg
+ 4, 0);
249 ep
->epf_bar
[bar
] = epf_bar
;
250 dw_pcie_dbi_ro_wr_dis(pci
);
255 static int dw_pcie_find_index(struct dw_pcie_ep
*ep
, phys_addr_t addr
,
259 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
261 for (index
= 0; index
< pci
->num_ob_windows
; index
++) {
262 if (ep
->outbound_addr
[index
] != addr
)
271 static u64
dw_pcie_ep_align_addr(struct pci_epc
*epc
, u64 pci_addr
,
272 size_t *pci_size
, size_t *offset
)
274 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
275 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
276 u64 mask
= pci
->region_align
- 1;
277 size_t ofst
= pci_addr
& mask
;
279 *pci_size
= ALIGN(ofst
+ *pci_size
, epc
->mem
->window
.page_size
);
282 return pci_addr
& ~mask
;
285 static void dw_pcie_ep_unmap_addr(struct pci_epc
*epc
, u8 func_no
, u8 vfunc_no
,
290 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
291 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
293 ret
= dw_pcie_find_index(ep
, addr
, &atu_index
);
297 ep
->outbound_addr
[atu_index
] = 0;
298 dw_pcie_disable_atu(pci
, PCIE_ATU_REGION_DIR_OB
, atu_index
);
299 clear_bit(atu_index
, ep
->ob_window_map
);
302 static int dw_pcie_ep_map_addr(struct pci_epc
*epc
, u8 func_no
, u8 vfunc_no
,
303 phys_addr_t addr
, u64 pci_addr
, size_t size
)
306 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
307 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
308 struct dw_pcie_ob_atu_cfg atu
= { 0 };
310 atu
.func_no
= func_no
;
311 atu
.type
= PCIE_ATU_TYPE_MEM
;
313 atu
.pci_addr
= pci_addr
;
315 ret
= dw_pcie_ep_outbound_atu(ep
, &atu
);
317 dev_err(pci
->dev
, "Failed to enable address\n");
324 static int dw_pcie_ep_get_msi(struct pci_epc
*epc
, u8 func_no
, u8 vfunc_no
)
326 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
327 struct dw_pcie_ep_func
*ep_func
;
330 ep_func
= dw_pcie_ep_get_func_from_ep(ep
, func_no
);
331 if (!ep_func
|| !ep_func
->msi_cap
)
334 reg
= ep_func
->msi_cap
+ PCI_MSI_FLAGS
;
335 val
= dw_pcie_ep_readw_dbi(ep
, func_no
, reg
);
336 if (!(val
& PCI_MSI_FLAGS_ENABLE
))
339 val
= FIELD_GET(PCI_MSI_FLAGS_QSIZE
, val
);
344 static int dw_pcie_ep_set_msi(struct pci_epc
*epc
, u8 func_no
, u8 vfunc_no
,
347 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
348 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
349 struct dw_pcie_ep_func
*ep_func
;
352 ep_func
= dw_pcie_ep_get_func_from_ep(ep
, func_no
);
353 if (!ep_func
|| !ep_func
->msi_cap
)
356 reg
= ep_func
->msi_cap
+ PCI_MSI_FLAGS
;
357 val
= dw_pcie_ep_readw_dbi(ep
, func_no
, reg
);
358 val
&= ~PCI_MSI_FLAGS_QMASK
;
359 val
|= FIELD_PREP(PCI_MSI_FLAGS_QMASK
, interrupts
);
360 dw_pcie_dbi_ro_wr_en(pci
);
361 dw_pcie_ep_writew_dbi(ep
, func_no
, reg
, val
);
362 dw_pcie_dbi_ro_wr_dis(pci
);
367 static int dw_pcie_ep_get_msix(struct pci_epc
*epc
, u8 func_no
, u8 vfunc_no
)
369 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
370 struct dw_pcie_ep_func
*ep_func
;
373 ep_func
= dw_pcie_ep_get_func_from_ep(ep
, func_no
);
374 if (!ep_func
|| !ep_func
->msix_cap
)
377 reg
= ep_func
->msix_cap
+ PCI_MSIX_FLAGS
;
378 val
= dw_pcie_ep_readw_dbi(ep
, func_no
, reg
);
379 if (!(val
& PCI_MSIX_FLAGS_ENABLE
))
382 val
&= PCI_MSIX_FLAGS_QSIZE
;
387 static int dw_pcie_ep_set_msix(struct pci_epc
*epc
, u8 func_no
, u8 vfunc_no
,
388 u16 interrupts
, enum pci_barno bir
, u32 offset
)
390 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
391 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
392 struct dw_pcie_ep_func
*ep_func
;
395 ep_func
= dw_pcie_ep_get_func_from_ep(ep
, func_no
);
396 if (!ep_func
|| !ep_func
->msix_cap
)
399 dw_pcie_dbi_ro_wr_en(pci
);
401 reg
= ep_func
->msix_cap
+ PCI_MSIX_FLAGS
;
402 val
= dw_pcie_ep_readw_dbi(ep
, func_no
, reg
);
403 val
&= ~PCI_MSIX_FLAGS_QSIZE
;
405 dw_pcie_writew_dbi(pci
, reg
, val
);
407 reg
= ep_func
->msix_cap
+ PCI_MSIX_TABLE
;
409 dw_pcie_ep_writel_dbi(ep
, func_no
, reg
, val
);
411 reg
= ep_func
->msix_cap
+ PCI_MSIX_PBA
;
412 val
= (offset
+ (interrupts
* PCI_MSIX_ENTRY_SIZE
)) | bir
;
413 dw_pcie_ep_writel_dbi(ep
, func_no
, reg
, val
);
415 dw_pcie_dbi_ro_wr_dis(pci
);
420 static int dw_pcie_ep_raise_irq(struct pci_epc
*epc
, u8 func_no
, u8 vfunc_no
,
421 unsigned int type
, u16 interrupt_num
)
423 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
425 if (!ep
->ops
->raise_irq
)
428 return ep
->ops
->raise_irq(ep
, func_no
, type
, interrupt_num
);
431 static void dw_pcie_ep_stop(struct pci_epc
*epc
)
433 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
434 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
436 dw_pcie_stop_link(pci
);
439 static int dw_pcie_ep_start(struct pci_epc
*epc
)
441 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
442 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
444 return dw_pcie_start_link(pci
);
447 static const struct pci_epc_features
*
448 dw_pcie_ep_get_features(struct pci_epc
*epc
, u8 func_no
, u8 vfunc_no
)
450 struct dw_pcie_ep
*ep
= epc_get_drvdata(epc
);
452 if (!ep
->ops
->get_features
)
455 return ep
->ops
->get_features(ep
);
458 static const struct pci_epc_ops epc_ops
= {
459 .write_header
= dw_pcie_ep_write_header
,
460 .set_bar
= dw_pcie_ep_set_bar
,
461 .clear_bar
= dw_pcie_ep_clear_bar
,
462 .align_addr
= dw_pcie_ep_align_addr
,
463 .map_addr
= dw_pcie_ep_map_addr
,
464 .unmap_addr
= dw_pcie_ep_unmap_addr
,
465 .set_msi
= dw_pcie_ep_set_msi
,
466 .get_msi
= dw_pcie_ep_get_msi
,
467 .set_msix
= dw_pcie_ep_set_msix
,
468 .get_msix
= dw_pcie_ep_get_msix
,
469 .raise_irq
= dw_pcie_ep_raise_irq
,
470 .start
= dw_pcie_ep_start
,
471 .stop
= dw_pcie_ep_stop
,
472 .get_features
= dw_pcie_ep_get_features
,
476 * dw_pcie_ep_raise_intx_irq - Raise INTx IRQ to the host
478 * @func_no: Function number of the endpoint
480 * Return: 0 if success, errono otherwise.
482 int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep
*ep
, u8 func_no
)
484 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
485 struct device
*dev
= pci
->dev
;
487 dev_err(dev
, "EP cannot raise INTX IRQs\n");
491 EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_intx_irq
);
494 * dw_pcie_ep_raise_msi_irq - Raise MSI IRQ to the host
496 * @func_no: Function number of the endpoint
497 * @interrupt_num: Interrupt number to be raised
499 * Return: 0 if success, errono otherwise.
501 int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep
*ep
, u8 func_no
,
504 u32 msg_addr_lower
, msg_addr_upper
, reg
;
505 struct dw_pcie_ep_func
*ep_func
;
506 struct pci_epc
*epc
= ep
->epc
;
507 size_t map_size
= sizeof(u32
);
509 u16 msg_ctrl
, msg_data
;
514 ep_func
= dw_pcie_ep_get_func_from_ep(ep
, func_no
);
515 if (!ep_func
|| !ep_func
->msi_cap
)
518 /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
519 reg
= ep_func
->msi_cap
+ PCI_MSI_FLAGS
;
520 msg_ctrl
= dw_pcie_ep_readw_dbi(ep
, func_no
, reg
);
521 has_upper
= !!(msg_ctrl
& PCI_MSI_FLAGS_64BIT
);
522 reg
= ep_func
->msi_cap
+ PCI_MSI_ADDRESS_LO
;
523 msg_addr_lower
= dw_pcie_ep_readl_dbi(ep
, func_no
, reg
);
525 reg
= ep_func
->msi_cap
+ PCI_MSI_ADDRESS_HI
;
526 msg_addr_upper
= dw_pcie_ep_readl_dbi(ep
, func_no
, reg
);
527 reg
= ep_func
->msi_cap
+ PCI_MSI_DATA_64
;
528 msg_data
= dw_pcie_ep_readw_dbi(ep
, func_no
, reg
);
531 reg
= ep_func
->msi_cap
+ PCI_MSI_DATA_32
;
532 msg_data
= dw_pcie_ep_readw_dbi(ep
, func_no
, reg
);
534 msg_addr
= ((u64
)msg_addr_upper
) << 32 | msg_addr_lower
;
536 msg_addr
= dw_pcie_ep_align_addr(epc
, msg_addr
, &map_size
, &offset
);
537 ret
= dw_pcie_ep_map_addr(epc
, func_no
, 0, ep
->msi_mem_phys
, msg_addr
,
542 writel(msg_data
| (interrupt_num
- 1), ep
->msi_mem
+ offset
);
544 dw_pcie_ep_unmap_addr(epc
, func_no
, 0, ep
->msi_mem_phys
);
548 EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msi_irq
);
551 * dw_pcie_ep_raise_msix_irq_doorbell - Raise MSI-X to the host using Doorbell
554 * @func_no: Function number of the endpoint device
555 * @interrupt_num: Interrupt number to be raised
557 * Return: 0 if success, errno otherwise.
559 int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep
*ep
, u8 func_no
,
562 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
563 struct dw_pcie_ep_func
*ep_func
;
566 ep_func
= dw_pcie_ep_get_func_from_ep(ep
, func_no
);
567 if (!ep_func
|| !ep_func
->msix_cap
)
570 msg_data
= (func_no
<< PCIE_MSIX_DOORBELL_PF_SHIFT
) |
573 dw_pcie_writel_dbi(pci
, PCIE_MSIX_DOORBELL
, msg_data
);
579 * dw_pcie_ep_raise_msix_irq - Raise MSI-X to the host
581 * @func_no: Function number of the endpoint device
582 * @interrupt_num: Interrupt number to be raised
584 * Return: 0 if success, errno otherwise.
586 int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep
*ep
, u8 func_no
,
589 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
590 struct pci_epf_msix_tbl
*msix_tbl
;
591 struct dw_pcie_ep_func
*ep_func
;
592 struct pci_epc
*epc
= ep
->epc
;
593 size_t map_size
= sizeof(u32
);
595 u32 reg
, msg_data
, vec_ctrl
;
601 ep_func
= dw_pcie_ep_get_func_from_ep(ep
, func_no
);
602 if (!ep_func
|| !ep_func
->msix_cap
)
605 reg
= ep_func
->msix_cap
+ PCI_MSIX_TABLE
;
606 tbl_offset
= dw_pcie_ep_readl_dbi(ep
, func_no
, reg
);
607 bir
= FIELD_GET(PCI_MSIX_TABLE_BIR
, tbl_offset
);
608 tbl_offset
&= PCI_MSIX_TABLE_OFFSET
;
610 msix_tbl
= ep
->epf_bar
[bir
]->addr
+ tbl_offset
;
611 msg_addr
= msix_tbl
[(interrupt_num
- 1)].msg_addr
;
612 msg_data
= msix_tbl
[(interrupt_num
- 1)].msg_data
;
613 vec_ctrl
= msix_tbl
[(interrupt_num
- 1)].vector_ctrl
;
615 if (vec_ctrl
& PCI_MSIX_ENTRY_CTRL_MASKBIT
) {
616 dev_dbg(pci
->dev
, "MSI-X entry ctrl set\n");
620 msg_addr
= dw_pcie_ep_align_addr(epc
, msg_addr
, &map_size
, &offset
);
621 ret
= dw_pcie_ep_map_addr(epc
, func_no
, 0, ep
->msi_mem_phys
, msg_addr
,
626 writel(msg_data
, ep
->msi_mem
+ offset
);
628 dw_pcie_ep_unmap_addr(epc
, func_no
, 0, ep
->msi_mem_phys
);
634 * dw_pcie_ep_cleanup - Cleanup DWC EP resources after fundamental reset
637 * Cleans up the DWC EP specific resources like eDMA etc... after fundamental
638 * reset like PERST#. Note that this API is only applicable for drivers
639 * supporting PERST# or any other methods of fundamental reset.
641 void dw_pcie_ep_cleanup(struct dw_pcie_ep
*ep
)
643 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
645 dw_pcie_edma_remove(pci
);
647 EXPORT_SYMBOL_GPL(dw_pcie_ep_cleanup
);
650 * dw_pcie_ep_deinit - Deinitialize the endpoint device
653 * Deinitialize the endpoint device. EPC device is not destroyed since that will
654 * be taken care by Devres.
656 void dw_pcie_ep_deinit(struct dw_pcie_ep
*ep
)
658 struct pci_epc
*epc
= ep
->epc
;
660 dw_pcie_ep_cleanup(ep
);
662 pci_epc_mem_free_addr(epc
, ep
->msi_mem_phys
, ep
->msi_mem
,
663 epc
->mem
->window
.page_size
);
665 pci_epc_mem_exit(epc
);
667 EXPORT_SYMBOL_GPL(dw_pcie_ep_deinit
);
669 static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie
*pci
, int cap
)
672 int pos
= PCI_CFG_SPACE_SIZE
;
675 header
= dw_pcie_readl_dbi(pci
, pos
);
676 if (PCI_EXT_CAP_ID(header
) == cap
)
679 pos
= PCI_EXT_CAP_NEXT(header
);
687 static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie
*pci
)
693 offset
= dw_pcie_ep_find_ext_capability(pci
, PCI_EXT_CAP_ID_REBAR
);
695 dw_pcie_dbi_ro_wr_en(pci
);
698 reg
= dw_pcie_readl_dbi(pci
, offset
+ PCI_REBAR_CTRL
);
699 nbars
= (reg
& PCI_REBAR_CTRL_NBAR_MASK
) >>
700 PCI_REBAR_CTRL_NBAR_SHIFT
;
703 * PCIe r6.0, sec 7.8.6.2 require us to support at least one
704 * size in the range from 1 MB to 512 GB. Advertise support
705 * for 1 MB BAR size only.
707 for (i
= 0; i
< nbars
; i
++, offset
+= PCI_REBAR_CTRL
)
708 dw_pcie_writel_dbi(pci
, offset
+ PCI_REBAR_CAP
, BIT(4));
712 dw_pcie_dbi_ro_wr_dis(pci
);
716 * dw_pcie_ep_init_registers - Initialize DWC EP specific registers
719 * Initialize the registers (CSRs) specific to DWC EP. This API should be called
720 * only when the endpoint receives an active refclk (either from host or
721 * generated locally).
723 int dw_pcie_ep_init_registers(struct dw_pcie_ep
*ep
)
725 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
726 struct dw_pcie_ep_func
*ep_func
;
727 struct device
*dev
= pci
->dev
;
728 struct pci_epc
*epc
= ep
->epc
;
729 u32 ptm_cap_base
, reg
;
735 hdr_type
= dw_pcie_readb_dbi(pci
, PCI_HEADER_TYPE
) &
736 PCI_HEADER_TYPE_MASK
;
737 if (hdr_type
!= PCI_HEADER_TYPE_NORMAL
) {
739 "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
744 dw_pcie_version_detect(pci
);
746 dw_pcie_iatu_detect(pci
);
748 ret
= dw_pcie_edma_detect(pci
);
752 if (!ep
->ib_window_map
) {
753 ep
->ib_window_map
= devm_bitmap_zalloc(dev
, pci
->num_ib_windows
,
755 if (!ep
->ib_window_map
)
756 goto err_remove_edma
;
759 if (!ep
->ob_window_map
) {
760 ep
->ob_window_map
= devm_bitmap_zalloc(dev
, pci
->num_ob_windows
,
762 if (!ep
->ob_window_map
)
763 goto err_remove_edma
;
766 if (!ep
->outbound_addr
) {
767 addr
= devm_kcalloc(dev
, pci
->num_ob_windows
, sizeof(phys_addr_t
),
770 goto err_remove_edma
;
771 ep
->outbound_addr
= addr
;
774 for (func_no
= 0; func_no
< epc
->max_functions
; func_no
++) {
776 ep_func
= dw_pcie_ep_get_func_from_ep(ep
, func_no
);
780 ep_func
= devm_kzalloc(dev
, sizeof(*ep_func
), GFP_KERNEL
);
782 goto err_remove_edma
;
784 ep_func
->func_no
= func_no
;
785 ep_func
->msi_cap
= dw_pcie_ep_find_capability(ep
, func_no
,
787 ep_func
->msix_cap
= dw_pcie_ep_find_capability(ep
, func_no
,
790 list_add_tail(&ep_func
->list
, &ep
->func_list
);
796 ptm_cap_base
= dw_pcie_ep_find_ext_capability(pci
, PCI_EXT_CAP_ID_PTM
);
799 * PTM responder capability can be disabled only after disabling
800 * PTM root capability.
803 dw_pcie_dbi_ro_wr_en(pci
);
804 reg
= dw_pcie_readl_dbi(pci
, ptm_cap_base
+ PCI_PTM_CAP
);
805 reg
&= ~PCI_PTM_CAP_ROOT
;
806 dw_pcie_writel_dbi(pci
, ptm_cap_base
+ PCI_PTM_CAP
, reg
);
808 reg
= dw_pcie_readl_dbi(pci
, ptm_cap_base
+ PCI_PTM_CAP
);
809 reg
&= ~(PCI_PTM_CAP_RES
| PCI_PTM_GRANULARITY_MASK
);
810 dw_pcie_writel_dbi(pci
, ptm_cap_base
+ PCI_PTM_CAP
, reg
);
811 dw_pcie_dbi_ro_wr_dis(pci
);
814 dw_pcie_ep_init_non_sticky_registers(pci
);
819 dw_pcie_edma_remove(pci
);
823 EXPORT_SYMBOL_GPL(dw_pcie_ep_init_registers
);
826 * dw_pcie_ep_linkup - Notify EPF drivers about Link Up event
829 void dw_pcie_ep_linkup(struct dw_pcie_ep
*ep
)
831 struct pci_epc
*epc
= ep
->epc
;
835 EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup
);
838 * dw_pcie_ep_linkdown - Notify EPF drivers about Link Down event
841 * Non-sticky registers are also initialized before sending the notification to
842 * the EPF drivers. This is needed since the registers need to be initialized
843 * before the link comes back again.
845 void dw_pcie_ep_linkdown(struct dw_pcie_ep
*ep
)
847 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
848 struct pci_epc
*epc
= ep
->epc
;
851 * Initialize the non-sticky DWC registers as they would've reset post
852 * Link Down. This is specifically needed for drivers not supporting
853 * PERST# as they have no way to reinitialize the registers before the
854 * link comes back again.
856 dw_pcie_ep_init_non_sticky_registers(pci
);
858 pci_epc_linkdown(epc
);
860 EXPORT_SYMBOL_GPL(dw_pcie_ep_linkdown
);
863 * dw_pcie_ep_init - Initialize the endpoint device
866 * Initialize the endpoint device. Allocate resources and create the EPC
867 * device with the endpoint framework.
869 * Return: 0 if success, errno otherwise.
871 int dw_pcie_ep_init(struct dw_pcie_ep
*ep
)
874 struct resource
*res
;
876 struct dw_pcie
*pci
= to_dw_pcie_from_ep(ep
);
877 struct device
*dev
= pci
->dev
;
878 struct platform_device
*pdev
= to_platform_device(dev
);
879 struct device_node
*np
= dev
->of_node
;
881 INIT_LIST_HEAD(&ep
->func_list
);
883 ret
= dw_pcie_get_resources(pci
);
887 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "addr_space");
891 ep
->phys_base
= res
->start
;
892 ep
->addr_size
= resource_size(res
);
894 if (ep
->ops
->pre_init
)
895 ep
->ops
->pre_init(ep
);
897 epc
= devm_pci_epc_create(dev
, &epc_ops
);
899 dev_err(dev
, "Failed to create epc device\n");
904 epc_set_drvdata(epc
, ep
);
906 ret
= of_property_read_u8(np
, "max-functions", &epc
->max_functions
);
908 epc
->max_functions
= 1;
910 ret
= pci_epc_mem_init(epc
, ep
->phys_base
, ep
->addr_size
,
913 dev_err(dev
, "Failed to initialize address space\n");
917 ep
->msi_mem
= pci_epc_mem_alloc_addr(epc
, &ep
->msi_mem_phys
,
918 epc
->mem
->window
.page_size
);
921 dev_err(dev
, "Failed to reserve memory for MSI/MSI-X\n");
922 goto err_exit_epc_mem
;
928 pci_epc_mem_exit(epc
);
932 EXPORT_SYMBOL_GPL(dw_pcie_ep_init
);