2 * Qualcomm PCIe root complex driver
4 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
5 * Copyright 2015 Linaro Limited.
7 * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 and
11 * only version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
19 #include <linux/clk.h>
20 #include <linux/delay.h>
21 #include <linux/gpio.h>
22 #include <linux/interrupt.h>
24 #include <linux/iopoll.h>
25 #include <linux/kernel.h>
26 #include <linux/init.h>
27 #include <linux/of_device.h>
28 #include <linux/of_gpio.h>
29 #include <linux/pci.h>
30 #include <linux/platform_device.h>
31 #include <linux/phy/phy.h>
32 #include <linux/regulator/consumer.h>
33 #include <linux/reset.h>
34 #include <linux/slab.h>
35 #include <linux/types.h>
37 #include "pcie-designware.h"
39 #define PCIE20_PARF_SYS_CTRL 0x00
40 #define PCIE20_PARF_PHY_CTRL 0x40
41 #define PCIE20_PARF_PHY_REFCLK 0x4C
42 #define PCIE20_PARF_DBI_BASE_ADDR 0x168
43 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
44 #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
45 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
46 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
47 #define PCIE20_PARF_LTSSM 0x1B0
48 #define PCIE20_PARF_SID_OFFSET 0x234
49 #define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
51 #define PCIE20_ELBI_SYS_CTRL 0x04
52 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
54 #define PCIE20_CAP 0x70
56 #define PERST_DELAY_US 1000
58 struct qcom_pcie_resources_v0
{
59 struct clk
*iface_clk
;
62 struct reset_control
*pci_reset
;
63 struct reset_control
*axi_reset
;
64 struct reset_control
*ahb_reset
;
65 struct reset_control
*por_reset
;
66 struct reset_control
*phy_reset
;
67 struct regulator
*vdda
;
68 struct regulator
*vdda_phy
;
69 struct regulator
*vdda_refclk
;
72 struct qcom_pcie_resources_v1
{
75 struct clk
*master_bus
;
76 struct clk
*slave_bus
;
77 struct reset_control
*core
;
78 struct regulator
*vdda
;
81 struct qcom_pcie_resources_v2
{
83 struct clk
*master_clk
;
84 struct clk
*slave_clk
;
89 union qcom_pcie_resources
{
90 struct qcom_pcie_resources_v0 v0
;
91 struct qcom_pcie_resources_v1 v1
;
92 struct qcom_pcie_resources_v2 v2
;
97 struct qcom_pcie_ops
{
98 int (*get_resources
)(struct qcom_pcie
*pcie
);
99 int (*init
)(struct qcom_pcie
*pcie
);
100 int (*post_init
)(struct qcom_pcie
*pcie
);
101 void (*deinit
)(struct qcom_pcie
*pcie
);
102 void (*ltssm_enable
)(struct qcom_pcie
*pcie
);
106 struct pcie_port pp
; /* pp.dbi_base is DT dbi */
107 void __iomem
*parf
; /* DT parf */
108 void __iomem
*elbi
; /* DT elbi */
109 union qcom_pcie_resources res
;
111 struct gpio_desc
*reset
;
112 struct qcom_pcie_ops
*ops
;
115 #define to_qcom_pcie(x) container_of(x, struct qcom_pcie, pp)
117 static void qcom_ep_reset_assert(struct qcom_pcie
*pcie
)
119 gpiod_set_value(pcie
->reset
, 1);
120 usleep_range(PERST_DELAY_US
, PERST_DELAY_US
+ 500);
123 static void qcom_ep_reset_deassert(struct qcom_pcie
*pcie
)
125 gpiod_set_value(pcie
->reset
, 0);
126 usleep_range(PERST_DELAY_US
, PERST_DELAY_US
+ 500);
129 static irqreturn_t
qcom_pcie_msi_irq_handler(int irq
, void *arg
)
131 struct pcie_port
*pp
= arg
;
133 return dw_handle_msi_irq(pp
);
136 static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie
*pcie
)
140 /* enable link training */
141 val
= readl(pcie
->elbi
+ PCIE20_ELBI_SYS_CTRL
);
142 val
|= PCIE20_ELBI_SYS_CTRL_LT_ENABLE
;
143 writel(val
, pcie
->elbi
+ PCIE20_ELBI_SYS_CTRL
);
146 static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie
*pcie
)
150 /* enable link training */
151 val
= readl(pcie
->parf
+ PCIE20_PARF_LTSSM
);
153 writel(val
, pcie
->parf
+ PCIE20_PARF_LTSSM
);
156 static int qcom_pcie_establish_link(struct qcom_pcie
*pcie
)
159 if (dw_pcie_link_up(&pcie
->pp
))
162 /* Enable Link Training state machine */
163 if (pcie
->ops
->ltssm_enable
)
164 pcie
->ops
->ltssm_enable(pcie
);
166 return dw_pcie_wait_for_link(&pcie
->pp
);
169 static int qcom_pcie_get_resources_v0(struct qcom_pcie
*pcie
)
171 struct qcom_pcie_resources_v0
*res
= &pcie
->res
.v0
;
172 struct device
*dev
= pcie
->pp
.dev
;
174 res
->vdda
= devm_regulator_get(dev
, "vdda");
175 if (IS_ERR(res
->vdda
))
176 return PTR_ERR(res
->vdda
);
178 res
->vdda_phy
= devm_regulator_get(dev
, "vdda_phy");
179 if (IS_ERR(res
->vdda_phy
))
180 return PTR_ERR(res
->vdda_phy
);
182 res
->vdda_refclk
= devm_regulator_get(dev
, "vdda_refclk");
183 if (IS_ERR(res
->vdda_refclk
))
184 return PTR_ERR(res
->vdda_refclk
);
186 res
->iface_clk
= devm_clk_get(dev
, "iface");
187 if (IS_ERR(res
->iface_clk
))
188 return PTR_ERR(res
->iface_clk
);
190 res
->core_clk
= devm_clk_get(dev
, "core");
191 if (IS_ERR(res
->core_clk
))
192 return PTR_ERR(res
->core_clk
);
194 res
->phy_clk
= devm_clk_get(dev
, "phy");
195 if (IS_ERR(res
->phy_clk
))
196 return PTR_ERR(res
->phy_clk
);
198 res
->pci_reset
= devm_reset_control_get(dev
, "pci");
199 if (IS_ERR(res
->pci_reset
))
200 return PTR_ERR(res
->pci_reset
);
202 res
->axi_reset
= devm_reset_control_get(dev
, "axi");
203 if (IS_ERR(res
->axi_reset
))
204 return PTR_ERR(res
->axi_reset
);
206 res
->ahb_reset
= devm_reset_control_get(dev
, "ahb");
207 if (IS_ERR(res
->ahb_reset
))
208 return PTR_ERR(res
->ahb_reset
);
210 res
->por_reset
= devm_reset_control_get(dev
, "por");
211 if (IS_ERR(res
->por_reset
))
212 return PTR_ERR(res
->por_reset
);
214 res
->phy_reset
= devm_reset_control_get(dev
, "phy");
215 if (IS_ERR(res
->phy_reset
))
216 return PTR_ERR(res
->phy_reset
);
221 static int qcom_pcie_get_resources_v1(struct qcom_pcie
*pcie
)
223 struct qcom_pcie_resources_v1
*res
= &pcie
->res
.v1
;
224 struct device
*dev
= pcie
->pp
.dev
;
226 res
->vdda
= devm_regulator_get(dev
, "vdda");
227 if (IS_ERR(res
->vdda
))
228 return PTR_ERR(res
->vdda
);
230 res
->iface
= devm_clk_get(dev
, "iface");
231 if (IS_ERR(res
->iface
))
232 return PTR_ERR(res
->iface
);
234 res
->aux
= devm_clk_get(dev
, "aux");
235 if (IS_ERR(res
->aux
))
236 return PTR_ERR(res
->aux
);
238 res
->master_bus
= devm_clk_get(dev
, "master_bus");
239 if (IS_ERR(res
->master_bus
))
240 return PTR_ERR(res
->master_bus
);
242 res
->slave_bus
= devm_clk_get(dev
, "slave_bus");
243 if (IS_ERR(res
->slave_bus
))
244 return PTR_ERR(res
->slave_bus
);
246 res
->core
= devm_reset_control_get(dev
, "core");
247 if (IS_ERR(res
->core
))
248 return PTR_ERR(res
->core
);
253 static void qcom_pcie_deinit_v0(struct qcom_pcie
*pcie
)
255 struct qcom_pcie_resources_v0
*res
= &pcie
->res
.v0
;
257 reset_control_assert(res
->pci_reset
);
258 reset_control_assert(res
->axi_reset
);
259 reset_control_assert(res
->ahb_reset
);
260 reset_control_assert(res
->por_reset
);
261 reset_control_assert(res
->pci_reset
);
262 clk_disable_unprepare(res
->iface_clk
);
263 clk_disable_unprepare(res
->core_clk
);
264 clk_disable_unprepare(res
->phy_clk
);
265 regulator_disable(res
->vdda
);
266 regulator_disable(res
->vdda_phy
);
267 regulator_disable(res
->vdda_refclk
);
270 static int qcom_pcie_init_v0(struct qcom_pcie
*pcie
)
272 struct qcom_pcie_resources_v0
*res
= &pcie
->res
.v0
;
273 struct device
*dev
= pcie
->pp
.dev
;
277 ret
= regulator_enable(res
->vdda
);
279 dev_err(dev
, "cannot enable vdda regulator\n");
283 ret
= regulator_enable(res
->vdda_refclk
);
285 dev_err(dev
, "cannot enable vdda_refclk regulator\n");
289 ret
= regulator_enable(res
->vdda_phy
);
291 dev_err(dev
, "cannot enable vdda_phy regulator\n");
295 ret
= reset_control_assert(res
->ahb_reset
);
297 dev_err(dev
, "cannot assert ahb reset\n");
301 ret
= clk_prepare_enable(res
->iface_clk
);
303 dev_err(dev
, "cannot prepare/enable iface clock\n");
307 ret
= clk_prepare_enable(res
->phy_clk
);
309 dev_err(dev
, "cannot prepare/enable phy clock\n");
313 ret
= clk_prepare_enable(res
->core_clk
);
315 dev_err(dev
, "cannot prepare/enable core clock\n");
319 ret
= reset_control_deassert(res
->ahb_reset
);
321 dev_err(dev
, "cannot deassert ahb reset\n");
322 goto err_deassert_ahb
;
325 /* enable PCIe clocks and resets */
326 val
= readl(pcie
->parf
+ PCIE20_PARF_PHY_CTRL
);
328 writel(val
, pcie
->parf
+ PCIE20_PARF_PHY_CTRL
);
330 /* enable external reference clock */
331 val
= readl(pcie
->parf
+ PCIE20_PARF_PHY_REFCLK
);
333 writel(val
, pcie
->parf
+ PCIE20_PARF_PHY_REFCLK
);
335 ret
= reset_control_deassert(res
->phy_reset
);
337 dev_err(dev
, "cannot deassert phy reset\n");
341 ret
= reset_control_deassert(res
->pci_reset
);
343 dev_err(dev
, "cannot deassert pci reset\n");
347 ret
= reset_control_deassert(res
->por_reset
);
349 dev_err(dev
, "cannot deassert por reset\n");
353 ret
= reset_control_deassert(res
->axi_reset
);
355 dev_err(dev
, "cannot deassert axi reset\n");
359 /* wait for clock acquisition */
360 usleep_range(1000, 1500);
365 clk_disable_unprepare(res
->core_clk
);
367 clk_disable_unprepare(res
->phy_clk
);
369 clk_disable_unprepare(res
->iface_clk
);
371 regulator_disable(res
->vdda_phy
);
373 regulator_disable(res
->vdda_refclk
);
375 regulator_disable(res
->vdda
);
380 static void qcom_pcie_deinit_v1(struct qcom_pcie
*pcie
)
382 struct qcom_pcie_resources_v1
*res
= &pcie
->res
.v1
;
384 reset_control_assert(res
->core
);
385 clk_disable_unprepare(res
->slave_bus
);
386 clk_disable_unprepare(res
->master_bus
);
387 clk_disable_unprepare(res
->iface
);
388 clk_disable_unprepare(res
->aux
);
389 regulator_disable(res
->vdda
);
392 static int qcom_pcie_init_v1(struct qcom_pcie
*pcie
)
394 struct qcom_pcie_resources_v1
*res
= &pcie
->res
.v1
;
395 struct device
*dev
= pcie
->pp
.dev
;
398 ret
= reset_control_deassert(res
->core
);
400 dev_err(dev
, "cannot deassert core reset\n");
404 ret
= clk_prepare_enable(res
->aux
);
406 dev_err(dev
, "cannot prepare/enable aux clock\n");
410 ret
= clk_prepare_enable(res
->iface
);
412 dev_err(dev
, "cannot prepare/enable iface clock\n");
416 ret
= clk_prepare_enable(res
->master_bus
);
418 dev_err(dev
, "cannot prepare/enable master_bus clock\n");
422 ret
= clk_prepare_enable(res
->slave_bus
);
424 dev_err(dev
, "cannot prepare/enable slave_bus clock\n");
428 ret
= regulator_enable(res
->vdda
);
430 dev_err(dev
, "cannot enable vdda regulator\n");
434 /* change DBI base address */
435 writel(0, pcie
->parf
+ PCIE20_PARF_DBI_BASE_ADDR
);
437 if (IS_ENABLED(CONFIG_PCI_MSI
)) {
438 u32 val
= readl(pcie
->parf
+ PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT
);
441 writel(val
, pcie
->parf
+ PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT
);
446 clk_disable_unprepare(res
->slave_bus
);
448 clk_disable_unprepare(res
->master_bus
);
450 clk_disable_unprepare(res
->iface
);
452 clk_disable_unprepare(res
->aux
);
454 reset_control_assert(res
->core
);
459 static int qcom_pcie_get_resources_v2(struct qcom_pcie
*pcie
)
461 struct qcom_pcie_resources_v2
*res
= &pcie
->res
.v2
;
462 struct device
*dev
= pcie
->pp
.dev
;
464 res
->aux_clk
= devm_clk_get(dev
, "aux");
465 if (IS_ERR(res
->aux_clk
))
466 return PTR_ERR(res
->aux_clk
);
468 res
->cfg_clk
= devm_clk_get(dev
, "cfg");
469 if (IS_ERR(res
->cfg_clk
))
470 return PTR_ERR(res
->cfg_clk
);
472 res
->master_clk
= devm_clk_get(dev
, "bus_master");
473 if (IS_ERR(res
->master_clk
))
474 return PTR_ERR(res
->master_clk
);
476 res
->slave_clk
= devm_clk_get(dev
, "bus_slave");
477 if (IS_ERR(res
->slave_clk
))
478 return PTR_ERR(res
->slave_clk
);
480 res
->pipe_clk
= devm_clk_get(dev
, "pipe");
481 if (IS_ERR(res
->pipe_clk
))
482 return PTR_ERR(res
->pipe_clk
);
487 static int qcom_pcie_init_v2(struct qcom_pcie
*pcie
)
489 struct qcom_pcie_resources_v2
*res
= &pcie
->res
.v2
;
490 struct device
*dev
= pcie
->pp
.dev
;
494 ret
= clk_prepare_enable(res
->aux_clk
);
496 dev_err(dev
, "cannot prepare/enable aux clock\n");
500 ret
= clk_prepare_enable(res
->cfg_clk
);
502 dev_err(dev
, "cannot prepare/enable cfg clock\n");
506 ret
= clk_prepare_enable(res
->master_clk
);
508 dev_err(dev
, "cannot prepare/enable master clock\n");
512 ret
= clk_prepare_enable(res
->slave_clk
);
514 dev_err(dev
, "cannot prepare/enable slave clock\n");
518 /* enable PCIe clocks and resets */
519 val
= readl(pcie
->parf
+ PCIE20_PARF_PHY_CTRL
);
521 writel(val
, pcie
->parf
+ PCIE20_PARF_PHY_CTRL
);
523 /* change DBI base address */
524 writel(0, pcie
->parf
+ PCIE20_PARF_DBI_BASE_ADDR
);
526 /* MAC PHY_POWERDOWN MUX DISABLE */
527 val
= readl(pcie
->parf
+ PCIE20_PARF_SYS_CTRL
);
529 writel(val
, pcie
->parf
+ PCIE20_PARF_SYS_CTRL
);
531 val
= readl(pcie
->parf
+ PCIE20_PARF_MHI_CLOCK_RESET_CTRL
);
533 writel(val
, pcie
->parf
+ PCIE20_PARF_MHI_CLOCK_RESET_CTRL
);
535 val
= readl(pcie
->parf
+ PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2
);
537 writel(val
, pcie
->parf
+ PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2
);
542 clk_disable_unprepare(res
->master_clk
);
544 clk_disable_unprepare(res
->cfg_clk
);
546 clk_disable_unprepare(res
->aux_clk
);
551 static int qcom_pcie_post_init_v2(struct qcom_pcie
*pcie
)
553 struct qcom_pcie_resources_v2
*res
= &pcie
->res
.v2
;
554 struct device
*dev
= pcie
->pp
.dev
;
557 ret
= clk_prepare_enable(res
->pipe_clk
);
559 dev_err(dev
, "cannot prepare/enable pipe clock\n");
566 static int qcom_pcie_link_up(struct pcie_port
*pp
)
568 struct qcom_pcie
*pcie
= to_qcom_pcie(pp
);
569 u16 val
= readw(pcie
->pp
.dbi_base
+ PCIE20_CAP
+ PCI_EXP_LNKSTA
);
571 return !!(val
& PCI_EXP_LNKSTA_DLLLA
);
574 static void qcom_pcie_deinit_v2(struct qcom_pcie
*pcie
)
576 struct qcom_pcie_resources_v2
*res
= &pcie
->res
.v2
;
578 clk_disable_unprepare(res
->pipe_clk
);
579 clk_disable_unprepare(res
->slave_clk
);
580 clk_disable_unprepare(res
->master_clk
);
581 clk_disable_unprepare(res
->cfg_clk
);
582 clk_disable_unprepare(res
->aux_clk
);
585 static void qcom_pcie_host_init(struct pcie_port
*pp
)
587 struct qcom_pcie
*pcie
= to_qcom_pcie(pp
);
590 qcom_ep_reset_assert(pcie
);
592 ret
= pcie
->ops
->init(pcie
);
596 ret
= phy_power_on(pcie
->phy
);
600 if (pcie
->ops
->post_init
)
601 pcie
->ops
->post_init(pcie
);
603 dw_pcie_setup_rc(pp
);
605 if (IS_ENABLED(CONFIG_PCI_MSI
))
606 dw_pcie_msi_init(pp
);
608 qcom_ep_reset_deassert(pcie
);
610 ret
= qcom_pcie_establish_link(pcie
);
616 qcom_ep_reset_assert(pcie
);
617 phy_power_off(pcie
->phy
);
619 pcie
->ops
->deinit(pcie
);
622 static int qcom_pcie_rd_own_conf(struct pcie_port
*pp
, int where
, int size
,
625 /* the device class is not reported correctly from the register */
626 if (where
== PCI_CLASS_REVISION
&& size
== 4) {
627 *val
= readl(pp
->dbi_base
+ PCI_CLASS_REVISION
);
628 *val
&= 0xff; /* keep revision id */
629 *val
|= PCI_CLASS_BRIDGE_PCI
<< 16;
630 return PCIBIOS_SUCCESSFUL
;
633 return dw_pcie_cfg_read(pp
->dbi_base
+ where
, size
, val
);
636 static struct pcie_host_ops qcom_pcie_dw_ops
= {
637 .link_up
= qcom_pcie_link_up
,
638 .host_init
= qcom_pcie_host_init
,
639 .rd_own_conf
= qcom_pcie_rd_own_conf
,
642 static const struct qcom_pcie_ops ops_v0
= {
643 .get_resources
= qcom_pcie_get_resources_v0
,
644 .init
= qcom_pcie_init_v0
,
645 .deinit
= qcom_pcie_deinit_v0
,
646 .ltssm_enable
= qcom_pcie_v0_v1_ltssm_enable
,
649 static const struct qcom_pcie_ops ops_v1
= {
650 .get_resources
= qcom_pcie_get_resources_v1
,
651 .init
= qcom_pcie_init_v1
,
652 .deinit
= qcom_pcie_deinit_v1
,
653 .ltssm_enable
= qcom_pcie_v0_v1_ltssm_enable
,
656 static const struct qcom_pcie_ops ops_v2
= {
657 .get_resources
= qcom_pcie_get_resources_v2
,
658 .init
= qcom_pcie_init_v2
,
659 .post_init
= qcom_pcie_post_init_v2
,
660 .deinit
= qcom_pcie_deinit_v2
,
661 .ltssm_enable
= qcom_pcie_v2_ltssm_enable
,
664 static int qcom_pcie_probe(struct platform_device
*pdev
)
666 struct device
*dev
= &pdev
->dev
;
667 struct resource
*res
;
668 struct qcom_pcie
*pcie
;
669 struct pcie_port
*pp
;
672 pcie
= devm_kzalloc(dev
, sizeof(*pcie
), GFP_KERNEL
);
677 pcie
->ops
= (struct qcom_pcie_ops
*)of_device_get_match_data(dev
);
679 pcie
->reset
= devm_gpiod_get_optional(dev
, "perst", GPIOD_OUT_LOW
);
680 if (IS_ERR(pcie
->reset
))
681 return PTR_ERR(pcie
->reset
);
683 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "parf");
684 pcie
->parf
= devm_ioremap_resource(dev
, res
);
685 if (IS_ERR(pcie
->parf
))
686 return PTR_ERR(pcie
->parf
);
688 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "dbi");
689 pp
->dbi_base
= devm_ioremap_resource(dev
, res
);
690 if (IS_ERR(pp
->dbi_base
))
691 return PTR_ERR(pp
->dbi_base
);
693 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "elbi");
694 pcie
->elbi
= devm_ioremap_resource(dev
, res
);
695 if (IS_ERR(pcie
->elbi
))
696 return PTR_ERR(pcie
->elbi
);
698 pcie
->phy
= devm_phy_optional_get(dev
, "pciephy");
699 if (IS_ERR(pcie
->phy
))
700 return PTR_ERR(pcie
->phy
);
703 ret
= pcie
->ops
->get_resources(pcie
);
707 pp
->root_bus_nr
= -1;
708 pp
->ops
= &qcom_pcie_dw_ops
;
710 if (IS_ENABLED(CONFIG_PCI_MSI
)) {
711 pp
->msi_irq
= platform_get_irq_byname(pdev
, "msi");
715 ret
= devm_request_irq(dev
, pp
->msi_irq
,
716 qcom_pcie_msi_irq_handler
,
717 IRQF_SHARED
, "qcom-pcie-msi", pp
);
719 dev_err(dev
, "cannot request msi irq\n");
724 ret
= phy_init(pcie
->phy
);
728 ret
= dw_pcie_host_init(pp
);
730 dev_err(dev
, "cannot initialize host\n");
737 static const struct of_device_id qcom_pcie_match
[] = {
738 { .compatible
= "qcom,pcie-ipq8064", .data
= &ops_v0
},
739 { .compatible
= "qcom,pcie-apq8064", .data
= &ops_v0
},
740 { .compatible
= "qcom,pcie-apq8084", .data
= &ops_v1
},
741 { .compatible
= "qcom,pcie-msm8996", .data
= &ops_v2
},
745 static struct platform_driver qcom_pcie_driver
= {
746 .probe
= qcom_pcie_probe
,
749 .suppress_bind_attrs
= true,
750 .of_match_table
= qcom_pcie_match
,
753 builtin_platform_driver(qcom_pcie_driver
);