xtensa: support DMA buffers in high memory
[cris-mirror.git] / drivers / pci / dwc / pcie-designware-host.c
blob8de2d5c69b1d9a6b892f97f7a240099dac9cf988
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Synopsys DesignWare PCIe host controller driver
5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com
8 * Author: Jingoo Han <jg1.han@samsung.com>
9 */
11 #include <linux/irqdomain.h>
12 #include <linux/of_address.h>
13 #include <linux/of_pci.h>
14 #include <linux/pci_regs.h>
15 #include <linux/platform_device.h>
17 #include "pcie-designware.h"
19 static struct pci_ops dw_pcie_ops;
21 static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
22 u32 *val)
24 struct dw_pcie *pci;
26 if (pp->ops->rd_own_conf)
27 return pp->ops->rd_own_conf(pp, where, size, val);
29 pci = to_dw_pcie_from_pp(pp);
30 return dw_pcie_read(pci->dbi_base + where, size, val);
33 static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
34 u32 val)
36 struct dw_pcie *pci;
38 if (pp->ops->wr_own_conf)
39 return pp->ops->wr_own_conf(pp, where, size, val);
41 pci = to_dw_pcie_from_pp(pp);
42 return dw_pcie_write(pci->dbi_base + where, size, val);
45 static struct irq_chip dw_msi_irq_chip = {
46 .name = "PCI-MSI",
47 .irq_enable = pci_msi_unmask_irq,
48 .irq_disable = pci_msi_mask_irq,
49 .irq_mask = pci_msi_mask_irq,
50 .irq_unmask = pci_msi_unmask_irq,
53 /* MSI int handler */
54 irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
56 u32 val;
57 int i, pos, irq;
58 irqreturn_t ret = IRQ_NONE;
60 for (i = 0; i < MAX_MSI_CTRLS; i++) {
61 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
62 &val);
63 if (!val)
64 continue;
66 ret = IRQ_HANDLED;
67 pos = 0;
68 while ((pos = find_next_bit((unsigned long *) &val, 32,
69 pos)) != 32) {
70 irq = irq_find_mapping(pp->irq_domain, i * 32 + pos);
71 generic_handle_irq(irq);
72 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12,
73 4, 1 << pos);
74 pos++;
78 return ret;
81 void dw_pcie_msi_init(struct pcie_port *pp)
83 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
84 struct device *dev = pci->dev;
85 struct page *page;
86 u64 msi_target;
88 page = alloc_page(GFP_KERNEL);
89 pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
90 if (dma_mapping_error(dev, pp->msi_data)) {
91 dev_err(dev, "failed to map MSI data\n");
92 __free_page(page);
93 return;
95 msi_target = (u64)pp->msi_data;
97 /* program the msi_data */
98 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
99 (u32)(msi_target & 0xffffffff));
100 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
101 (u32)(msi_target >> 32 & 0xffffffff));
104 static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
106 unsigned int res, bit, val;
108 res = (irq / 32) * 12;
109 bit = irq % 32;
110 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
111 val &= ~(1 << bit);
112 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
115 static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
116 unsigned int nvec, unsigned int pos)
118 unsigned int i;
120 for (i = 0; i < nvec; i++) {
121 irq_set_msi_desc_off(irq_base, i, NULL);
122 /* Disable corresponding interrupt on MSI controller */
123 if (pp->ops->msi_clear_irq)
124 pp->ops->msi_clear_irq(pp, pos + i);
125 else
126 dw_pcie_msi_clear_irq(pp, pos + i);
129 bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
132 static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
134 unsigned int res, bit, val;
136 res = (irq / 32) * 12;
137 bit = irq % 32;
138 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
139 val |= 1 << bit;
140 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
143 static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
145 int irq, pos0, i;
146 struct pcie_port *pp;
148 pp = (struct pcie_port *)msi_desc_to_pci_sysdata(desc);
149 pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
150 order_base_2(no_irqs));
151 if (pos0 < 0)
152 goto no_valid_irq;
154 irq = irq_find_mapping(pp->irq_domain, pos0);
155 if (!irq)
156 goto no_valid_irq;
159 * irq_create_mapping (called from dw_pcie_host_init) pre-allocates
160 * descs so there is no need to allocate descs here. We can therefore
161 * assume that if irq_find_mapping above returns non-zero, then the
162 * descs are also successfully allocated.
165 for (i = 0; i < no_irqs; i++) {
166 if (irq_set_msi_desc_off(irq, i, desc) != 0) {
167 clear_irq_range(pp, irq, i, pos0);
168 goto no_valid_irq;
170 /*Enable corresponding interrupt in MSI interrupt controller */
171 if (pp->ops->msi_set_irq)
172 pp->ops->msi_set_irq(pp, pos0 + i);
173 else
174 dw_pcie_msi_set_irq(pp, pos0 + i);
177 *pos = pos0;
178 desc->nvec_used = no_irqs;
179 desc->msi_attrib.multiple = order_base_2(no_irqs);
181 return irq;
183 no_valid_irq:
184 *pos = pos0;
185 return -ENOSPC;
188 static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos)
190 struct msi_msg msg;
191 u64 msi_target;
193 if (pp->ops->get_msi_addr)
194 msi_target = pp->ops->get_msi_addr(pp);
195 else
196 msi_target = (u64)pp->msi_data;
198 msg.address_lo = (u32)(msi_target & 0xffffffff);
199 msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
201 if (pp->ops->get_msi_data)
202 msg.data = pp->ops->get_msi_data(pp, pos);
203 else
204 msg.data = pos;
206 pci_write_msi_msg(irq, &msg);
209 static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
210 struct msi_desc *desc)
212 int irq, pos;
213 struct pcie_port *pp = pdev->bus->sysdata;
215 if (desc->msi_attrib.is_msix)
216 return -EINVAL;
218 irq = assign_irq(1, desc, &pos);
219 if (irq < 0)
220 return irq;
222 dw_msi_setup_msg(pp, irq, pos);
224 return 0;
227 static int dw_msi_setup_irqs(struct msi_controller *chip, struct pci_dev *pdev,
228 int nvec, int type)
230 #ifdef CONFIG_PCI_MSI
231 int irq, pos;
232 struct msi_desc *desc;
233 struct pcie_port *pp = pdev->bus->sysdata;
235 /* MSI-X interrupts are not supported */
236 if (type == PCI_CAP_ID_MSIX)
237 return -EINVAL;
239 WARN_ON(!list_is_singular(&pdev->dev.msi_list));
240 desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
242 irq = assign_irq(nvec, desc, &pos);
243 if (irq < 0)
244 return irq;
246 dw_msi_setup_msg(pp, irq, pos);
248 return 0;
249 #else
250 return -EINVAL;
251 #endif
254 static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
256 struct irq_data *data = irq_get_irq_data(irq);
257 struct msi_desc *msi = irq_data_get_msi_desc(data);
258 struct pcie_port *pp = (struct pcie_port *)msi_desc_to_pci_sysdata(msi);
260 clear_irq_range(pp, irq, 1, data->hwirq);
263 static struct msi_controller dw_pcie_msi_chip = {
264 .setup_irq = dw_msi_setup_irq,
265 .setup_irqs = dw_msi_setup_irqs,
266 .teardown_irq = dw_msi_teardown_irq,
269 static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
270 irq_hw_number_t hwirq)
272 irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
273 irq_set_chip_data(irq, domain->host_data);
275 return 0;
278 static const struct irq_domain_ops msi_domain_ops = {
279 .map = dw_pcie_msi_map,
282 int dw_pcie_host_init(struct pcie_port *pp)
284 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
285 struct device *dev = pci->dev;
286 struct device_node *np = dev->of_node;
287 struct platform_device *pdev = to_platform_device(dev);
288 struct pci_bus *bus, *child;
289 struct pci_host_bridge *bridge;
290 struct resource *cfg_res;
291 int i, ret;
292 struct resource_entry *win, *tmp;
294 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
295 if (cfg_res) {
296 pp->cfg0_size = resource_size(cfg_res) / 2;
297 pp->cfg1_size = resource_size(cfg_res) / 2;
298 pp->cfg0_base = cfg_res->start;
299 pp->cfg1_base = cfg_res->start + pp->cfg0_size;
300 } else if (!pp->va_cfg0_base) {
301 dev_err(dev, "missing *config* reg space\n");
304 bridge = pci_alloc_host_bridge(0);
305 if (!bridge)
306 return -ENOMEM;
308 ret = of_pci_get_host_bridge_resources(np, 0, 0xff,
309 &bridge->windows, &pp->io_base);
310 if (ret)
311 return ret;
313 ret = devm_request_pci_bus_resources(dev, &bridge->windows);
314 if (ret)
315 goto error;
317 /* Get the I/O and memory ranges from DT */
318 resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
319 switch (resource_type(win->res)) {
320 case IORESOURCE_IO:
321 ret = pci_remap_iospace(win->res, pp->io_base);
322 if (ret) {
323 dev_warn(dev, "error %d: failed to map resource %pR\n",
324 ret, win->res);
325 resource_list_destroy_entry(win);
326 } else {
327 pp->io = win->res;
328 pp->io->name = "I/O";
329 pp->io_size = resource_size(pp->io);
330 pp->io_bus_addr = pp->io->start - win->offset;
332 break;
333 case IORESOURCE_MEM:
334 pp->mem = win->res;
335 pp->mem->name = "MEM";
336 pp->mem_size = resource_size(pp->mem);
337 pp->mem_bus_addr = pp->mem->start - win->offset;
338 break;
339 case 0:
340 pp->cfg = win->res;
341 pp->cfg0_size = resource_size(pp->cfg) / 2;
342 pp->cfg1_size = resource_size(pp->cfg) / 2;
343 pp->cfg0_base = pp->cfg->start;
344 pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
345 break;
346 case IORESOURCE_BUS:
347 pp->busn = win->res;
348 break;
352 if (!pci->dbi_base) {
353 pci->dbi_base = devm_pci_remap_cfgspace(dev,
354 pp->cfg->start,
355 resource_size(pp->cfg));
356 if (!pci->dbi_base) {
357 dev_err(dev, "error with ioremap\n");
358 ret = -ENOMEM;
359 goto error;
363 pp->mem_base = pp->mem->start;
365 if (!pp->va_cfg0_base) {
366 pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
367 pp->cfg0_base, pp->cfg0_size);
368 if (!pp->va_cfg0_base) {
369 dev_err(dev, "error with ioremap in function\n");
370 ret = -ENOMEM;
371 goto error;
375 if (!pp->va_cfg1_base) {
376 pp->va_cfg1_base = devm_pci_remap_cfgspace(dev,
377 pp->cfg1_base,
378 pp->cfg1_size);
379 if (!pp->va_cfg1_base) {
380 dev_err(dev, "error with ioremap\n");
381 ret = -ENOMEM;
382 goto error;
386 ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
387 if (ret)
388 pci->num_viewport = 2;
390 if (IS_ENABLED(CONFIG_PCI_MSI)) {
391 if (!pp->ops->msi_host_init) {
392 pp->irq_domain = irq_domain_add_linear(dev->of_node,
393 MAX_MSI_IRQS, &msi_domain_ops,
394 &dw_pcie_msi_chip);
395 if (!pp->irq_domain) {
396 dev_err(dev, "irq domain init failed\n");
397 ret = -ENXIO;
398 goto error;
401 for (i = 0; i < MAX_MSI_IRQS; i++)
402 irq_create_mapping(pp->irq_domain, i);
403 } else {
404 ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
405 if (ret < 0)
406 goto error;
410 if (pp->ops->host_init) {
411 ret = pp->ops->host_init(pp);
412 if (ret)
413 goto error;
416 pp->root_bus_nr = pp->busn->start;
418 bridge->dev.parent = dev;
419 bridge->sysdata = pp;
420 bridge->busnr = pp->root_bus_nr;
421 bridge->ops = &dw_pcie_ops;
422 bridge->map_irq = of_irq_parse_and_map_pci;
423 bridge->swizzle_irq = pci_common_swizzle;
424 if (IS_ENABLED(CONFIG_PCI_MSI)) {
425 bridge->msi = &dw_pcie_msi_chip;
426 dw_pcie_msi_chip.dev = dev;
429 ret = pci_scan_root_bus_bridge(bridge);
430 if (ret)
431 goto error;
433 bus = bridge->bus;
435 if (pp->ops->scan_bus)
436 pp->ops->scan_bus(pp);
438 pci_bus_size_bridges(bus);
439 pci_bus_assign_resources(bus);
441 list_for_each_entry(child, &bus->children, node)
442 pcie_bus_configure_settings(child);
444 pci_bus_add_devices(bus);
445 return 0;
447 error:
448 pci_free_host_bridge(bridge);
449 return ret;
452 static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
453 u32 devfn, int where, int size, u32 *val)
455 int ret, type;
456 u32 busdev, cfg_size;
457 u64 cpu_addr;
458 void __iomem *va_cfg_base;
459 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
461 if (pp->ops->rd_other_conf)
462 return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val);
464 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
465 PCIE_ATU_FUNC(PCI_FUNC(devfn));
467 if (bus->parent->number == pp->root_bus_nr) {
468 type = PCIE_ATU_TYPE_CFG0;
469 cpu_addr = pp->cfg0_base;
470 cfg_size = pp->cfg0_size;
471 va_cfg_base = pp->va_cfg0_base;
472 } else {
473 type = PCIE_ATU_TYPE_CFG1;
474 cpu_addr = pp->cfg1_base;
475 cfg_size = pp->cfg1_size;
476 va_cfg_base = pp->va_cfg1_base;
479 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
480 type, cpu_addr,
481 busdev, cfg_size);
482 ret = dw_pcie_read(va_cfg_base + where, size, val);
483 if (pci->num_viewport <= 2)
484 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
485 PCIE_ATU_TYPE_IO, pp->io_base,
486 pp->io_bus_addr, pp->io_size);
488 return ret;
491 static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
492 u32 devfn, int where, int size, u32 val)
494 int ret, type;
495 u32 busdev, cfg_size;
496 u64 cpu_addr;
497 void __iomem *va_cfg_base;
498 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
500 if (pp->ops->wr_other_conf)
501 return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val);
503 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
504 PCIE_ATU_FUNC(PCI_FUNC(devfn));
506 if (bus->parent->number == pp->root_bus_nr) {
507 type = PCIE_ATU_TYPE_CFG0;
508 cpu_addr = pp->cfg0_base;
509 cfg_size = pp->cfg0_size;
510 va_cfg_base = pp->va_cfg0_base;
511 } else {
512 type = PCIE_ATU_TYPE_CFG1;
513 cpu_addr = pp->cfg1_base;
514 cfg_size = pp->cfg1_size;
515 va_cfg_base = pp->va_cfg1_base;
518 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
519 type, cpu_addr,
520 busdev, cfg_size);
521 ret = dw_pcie_write(va_cfg_base + where, size, val);
522 if (pci->num_viewport <= 2)
523 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
524 PCIE_ATU_TYPE_IO, pp->io_base,
525 pp->io_bus_addr, pp->io_size);
527 return ret;
530 static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
531 int dev)
533 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
535 /* If there is no link, then there is no device */
536 if (bus->number != pp->root_bus_nr) {
537 if (!dw_pcie_link_up(pci))
538 return 0;
541 /* access only one slot on each root port */
542 if (bus->number == pp->root_bus_nr && dev > 0)
543 return 0;
545 return 1;
548 static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
549 int size, u32 *val)
551 struct pcie_port *pp = bus->sysdata;
553 if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
554 *val = 0xffffffff;
555 return PCIBIOS_DEVICE_NOT_FOUND;
558 if (bus->number == pp->root_bus_nr)
559 return dw_pcie_rd_own_conf(pp, where, size, val);
561 return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
564 static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
565 int where, int size, u32 val)
567 struct pcie_port *pp = bus->sysdata;
569 if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
570 return PCIBIOS_DEVICE_NOT_FOUND;
572 if (bus->number == pp->root_bus_nr)
573 return dw_pcie_wr_own_conf(pp, where, size, val);
575 return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
578 static struct pci_ops dw_pcie_ops = {
579 .read = dw_pcie_rd_conf,
580 .write = dw_pcie_wr_conf,
583 static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
585 u32 val;
587 val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
588 if (val == 0xffffffff)
589 return 1;
591 return 0;
594 void dw_pcie_setup_rc(struct pcie_port *pp)
596 u32 val;
597 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
599 dw_pcie_setup(pci);
601 /* setup RC BARs */
602 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
603 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
605 /* setup interrupt pins */
606 dw_pcie_dbi_ro_wr_en(pci);
607 val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
608 val &= 0xffff00ff;
609 val |= 0x00000100;
610 dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
611 dw_pcie_dbi_ro_wr_dis(pci);
613 /* setup bus numbers */
614 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
615 val &= 0xff000000;
616 val |= 0x00010100;
617 dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
619 /* setup command register */
620 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
621 val &= 0xffff0000;
622 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
623 PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
624 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
627 * If the platform provides ->rd_other_conf, it means the platform
628 * uses its own address translation component rather than ATU, so
629 * we should not program the ATU here.
631 if (!pp->ops->rd_other_conf) {
632 /* get iATU unroll support */
633 pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
634 dev_dbg(pci->dev, "iATU unroll: %s\n",
635 pci->iatu_unroll_enabled ? "enabled" : "disabled");
637 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
638 PCIE_ATU_TYPE_MEM, pp->mem_base,
639 pp->mem_bus_addr, pp->mem_size);
640 if (pci->num_viewport > 2)
641 dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
642 PCIE_ATU_TYPE_IO, pp->io_base,
643 pp->io_bus_addr, pp->io_size);
646 dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
648 /* Enable write permission for the DBI read-only register */
649 dw_pcie_dbi_ro_wr_en(pci);
650 /* program correct class for RC */
651 dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
652 /* Better disable write permission right after the update */
653 dw_pcie_dbi_ro_wr_dis(pci);
655 dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
656 val |= PORT_LOGIC_SPEED_CHANGE;
657 dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);