treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / s390 / pci / pci.c
blobbc61ea18e88d90bc183b80c34cbfc4f372c343de
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corp. 2012
5 * Author(s):
6 * Jan Glauber <jang@linux.vnet.ibm.com>
8 * The System z PCI code is a rewrite from a prototype by
9 * the following people (Kudoz!):
10 * Alexander Schmidt
11 * Christoph Raisch
12 * Hannes Hering
13 * Hoang-Nam Nguyen
14 * Jan-Bernd Themann
15 * Stefan Roscher
16 * Thomas Klein
19 #define KMSG_COMPONENT "zpci"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22 #include <linux/kernel.h>
23 #include <linux/slab.h>
24 #include <linux/err.h>
25 #include <linux/export.h>
26 #include <linux/delay.h>
27 #include <linux/seq_file.h>
28 #include <linux/jump_label.h>
29 #include <linux/pci.h>
30 #include <linux/printk.h>
32 #include <asm/isc.h>
33 #include <asm/airq.h>
34 #include <asm/facility.h>
35 #include <asm/pci_insn.h>
36 #include <asm/pci_clp.h>
37 #include <asm/pci_dma.h>
39 /* list of all detected zpci devices */
40 static LIST_HEAD(zpci_list);
41 static DEFINE_SPINLOCK(zpci_list_lock);
43 static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
44 static DEFINE_SPINLOCK(zpci_domain_lock);
46 #define ZPCI_IOMAP_ENTRIES \
47 min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2), \
48 ZPCI_IOMAP_MAX_ENTRIES)
50 static DEFINE_SPINLOCK(zpci_iomap_lock);
51 static unsigned long *zpci_iomap_bitmap;
52 struct zpci_iomap_entry *zpci_iomap_start;
53 EXPORT_SYMBOL_GPL(zpci_iomap_start);
55 DEFINE_STATIC_KEY_FALSE(have_mio);
57 static struct kmem_cache *zdev_fmb_cache;
59 struct zpci_dev *get_zdev_by_fid(u32 fid)
61 struct zpci_dev *tmp, *zdev = NULL;
63 spin_lock(&zpci_list_lock);
64 list_for_each_entry(tmp, &zpci_list, entry) {
65 if (tmp->fid == fid) {
66 zdev = tmp;
67 break;
70 spin_unlock(&zpci_list_lock);
71 return zdev;
74 void zpci_remove_reserved_devices(void)
76 struct zpci_dev *tmp, *zdev;
77 enum zpci_state state;
78 LIST_HEAD(remove);
80 spin_lock(&zpci_list_lock);
81 list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
82 if (zdev->state == ZPCI_FN_STATE_STANDBY &&
83 !clp_get_state(zdev->fid, &state) &&
84 state == ZPCI_FN_STATE_RESERVED)
85 list_move_tail(&zdev->entry, &remove);
87 spin_unlock(&zpci_list_lock);
89 list_for_each_entry_safe(zdev, tmp, &remove, entry)
90 zpci_remove_device(zdev);
93 static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus)
95 return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL;
98 int pci_domain_nr(struct pci_bus *bus)
100 return ((struct zpci_dev *) bus->sysdata)->domain;
102 EXPORT_SYMBOL_GPL(pci_domain_nr);
104 int pci_proc_domain(struct pci_bus *bus)
106 return pci_domain_nr(bus);
108 EXPORT_SYMBOL_GPL(pci_proc_domain);
110 /* Modify PCI: Register I/O address translation parameters */
111 int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
112 u64 base, u64 limit, u64 iota)
114 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
115 struct zpci_fib fib = {0};
116 u8 status;
118 WARN_ON_ONCE(iota & 0x3fff);
119 fib.pba = base;
120 fib.pal = limit;
121 fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
122 return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
125 /* Modify PCI: Unregister I/O address translation parameters */
126 int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
128 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
129 struct zpci_fib fib = {0};
130 u8 cc, status;
132 cc = zpci_mod_fc(req, &fib, &status);
133 if (cc == 3) /* Function already gone. */
134 cc = 0;
135 return cc ? -EIO : 0;
138 /* Modify PCI: Set PCI function measurement parameters */
139 int zpci_fmb_enable_device(struct zpci_dev *zdev)
141 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
142 struct zpci_fib fib = {0};
143 u8 cc, status;
145 if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
146 return -EINVAL;
148 zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
149 if (!zdev->fmb)
150 return -ENOMEM;
151 WARN_ON((u64) zdev->fmb & 0xf);
153 /* reset software counters */
154 atomic64_set(&zdev->allocated_pages, 0);
155 atomic64_set(&zdev->mapped_pages, 0);
156 atomic64_set(&zdev->unmapped_pages, 0);
158 fib.fmb_addr = virt_to_phys(zdev->fmb);
159 cc = zpci_mod_fc(req, &fib, &status);
160 if (cc) {
161 kmem_cache_free(zdev_fmb_cache, zdev->fmb);
162 zdev->fmb = NULL;
164 return cc ? -EIO : 0;
167 /* Modify PCI: Disable PCI function measurement */
168 int zpci_fmb_disable_device(struct zpci_dev *zdev)
170 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
171 struct zpci_fib fib = {0};
172 u8 cc, status;
174 if (!zdev->fmb)
175 return -EINVAL;
177 /* Function measurement is disabled if fmb address is zero */
178 cc = zpci_mod_fc(req, &fib, &status);
179 if (cc == 3) /* Function already gone. */
180 cc = 0;
182 if (!cc) {
183 kmem_cache_free(zdev_fmb_cache, zdev->fmb);
184 zdev->fmb = NULL;
186 return cc ? -EIO : 0;
189 static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
191 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
192 u64 data;
193 int rc;
195 rc = __zpci_load(&data, req, offset);
196 if (!rc) {
197 data = le64_to_cpu((__force __le64) data);
198 data >>= (8 - len) * 8;
199 *val = (u32) data;
200 } else
201 *val = 0xffffffff;
202 return rc;
205 static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
207 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
208 u64 data = val;
209 int rc;
211 data <<= (8 - len) * 8;
212 data = (__force u64) cpu_to_le64(data);
213 rc = __zpci_store(data, req, offset);
214 return rc;
217 resource_size_t pcibios_align_resource(void *data, const struct resource *res,
218 resource_size_t size,
219 resource_size_t align)
221 return 0;
224 /* combine single writes by using store-block insn */
225 void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
227 zpci_memcpy_toio(to, from, count);
230 void __iomem *ioremap(unsigned long ioaddr, unsigned long size)
232 struct vm_struct *area;
233 unsigned long offset;
235 if (!size)
236 return NULL;
238 if (!static_branch_unlikely(&have_mio))
239 return (void __iomem *) ioaddr;
241 offset = ioaddr & ~PAGE_MASK;
242 ioaddr &= PAGE_MASK;
243 size = PAGE_ALIGN(size + offset);
244 area = get_vm_area(size, VM_IOREMAP);
245 if (!area)
246 return NULL;
248 if (ioremap_page_range((unsigned long) area->addr,
249 (unsigned long) area->addr + size,
250 ioaddr, PAGE_KERNEL)) {
251 vunmap(area->addr);
252 return NULL;
254 return (void __iomem *) ((unsigned long) area->addr + offset);
256 EXPORT_SYMBOL(ioremap);
258 void iounmap(volatile void __iomem *addr)
260 if (static_branch_likely(&have_mio))
261 vunmap((__force void *) ((unsigned long) addr & PAGE_MASK));
263 EXPORT_SYMBOL(iounmap);
265 /* Create a virtual mapping cookie for a PCI BAR */
266 static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar,
267 unsigned long offset, unsigned long max)
269 struct zpci_dev *zdev = to_zpci(pdev);
270 int idx;
272 idx = zdev->bars[bar].map_idx;
273 spin_lock(&zpci_iomap_lock);
274 /* Detect overrun */
275 WARN_ON(!++zpci_iomap_start[idx].count);
276 zpci_iomap_start[idx].fh = zdev->fh;
277 zpci_iomap_start[idx].bar = bar;
278 spin_unlock(&zpci_iomap_lock);
280 return (void __iomem *) ZPCI_ADDR(idx) + offset;
283 static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
284 unsigned long offset,
285 unsigned long max)
287 unsigned long barsize = pci_resource_len(pdev, bar);
288 struct zpci_dev *zdev = to_zpci(pdev);
289 void __iomem *iova;
291 iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize);
292 return iova ? iova + offset : iova;
295 void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
296 unsigned long offset, unsigned long max)
298 if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
299 return NULL;
301 if (static_branch_likely(&have_mio))
302 return pci_iomap_range_mio(pdev, bar, offset, max);
303 else
304 return pci_iomap_range_fh(pdev, bar, offset, max);
306 EXPORT_SYMBOL(pci_iomap_range);
308 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
310 return pci_iomap_range(dev, bar, 0, maxlen);
312 EXPORT_SYMBOL(pci_iomap);
314 static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
315 unsigned long offset, unsigned long max)
317 unsigned long barsize = pci_resource_len(pdev, bar);
318 struct zpci_dev *zdev = to_zpci(pdev);
319 void __iomem *iova;
321 iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize);
322 return iova ? iova + offset : iova;
325 void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
326 unsigned long offset, unsigned long max)
328 if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
329 return NULL;
331 if (static_branch_likely(&have_mio))
332 return pci_iomap_wc_range_mio(pdev, bar, offset, max);
333 else
334 return pci_iomap_range_fh(pdev, bar, offset, max);
336 EXPORT_SYMBOL(pci_iomap_wc_range);
338 void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
340 return pci_iomap_wc_range(dev, bar, 0, maxlen);
342 EXPORT_SYMBOL(pci_iomap_wc);
344 static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr)
346 unsigned int idx = ZPCI_IDX(addr);
348 spin_lock(&zpci_iomap_lock);
349 /* Detect underrun */
350 WARN_ON(!zpci_iomap_start[idx].count);
351 if (!--zpci_iomap_start[idx].count) {
352 zpci_iomap_start[idx].fh = 0;
353 zpci_iomap_start[idx].bar = 0;
355 spin_unlock(&zpci_iomap_lock);
358 static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr)
360 iounmap(addr);
363 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
365 if (static_branch_likely(&have_mio))
366 pci_iounmap_mio(pdev, addr);
367 else
368 pci_iounmap_fh(pdev, addr);
370 EXPORT_SYMBOL(pci_iounmap);
372 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
373 int size, u32 *val)
375 struct zpci_dev *zdev = get_zdev_by_bus(bus);
376 int ret;
378 if (!zdev || devfn != ZPCI_DEVFN)
379 ret = -ENODEV;
380 else
381 ret = zpci_cfg_load(zdev, where, val, size);
383 return ret;
386 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
387 int size, u32 val)
389 struct zpci_dev *zdev = get_zdev_by_bus(bus);
390 int ret;
392 if (!zdev || devfn != ZPCI_DEVFN)
393 ret = -ENODEV;
394 else
395 ret = zpci_cfg_store(zdev, where, val, size);
397 return ret;
400 static struct pci_ops pci_root_ops = {
401 .read = pci_read,
402 .write = pci_write,
405 #ifdef CONFIG_PCI_IOV
406 static struct resource iov_res = {
407 .name = "PCI IOV res",
408 .start = 0,
409 .end = -1,
410 .flags = IORESOURCE_MEM,
412 #endif
414 static void zpci_map_resources(struct pci_dev *pdev)
416 struct zpci_dev *zdev = to_zpci(pdev);
417 resource_size_t len;
418 int i;
420 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
421 len = pci_resource_len(pdev, i);
422 if (!len)
423 continue;
425 if (zpci_use_mio(zdev))
426 pdev->resource[i].start =
427 (resource_size_t __force) zdev->bars[i].mio_wb;
428 else
429 pdev->resource[i].start = (resource_size_t __force)
430 pci_iomap_range_fh(pdev, i, 0, 0);
431 pdev->resource[i].end = pdev->resource[i].start + len - 1;
434 #ifdef CONFIG_PCI_IOV
435 for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
436 int bar = i + PCI_IOV_RESOURCES;
438 len = pci_resource_len(pdev, bar);
439 if (!len)
440 continue;
441 pdev->resource[bar].parent = &iov_res;
443 #endif
446 static void zpci_unmap_resources(struct pci_dev *pdev)
448 struct zpci_dev *zdev = to_zpci(pdev);
449 resource_size_t len;
450 int i;
452 if (zpci_use_mio(zdev))
453 return;
455 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
456 len = pci_resource_len(pdev, i);
457 if (!len)
458 continue;
459 pci_iounmap_fh(pdev, (void __iomem __force *)
460 pdev->resource[i].start);
464 static int zpci_alloc_iomap(struct zpci_dev *zdev)
466 unsigned long entry;
468 spin_lock(&zpci_iomap_lock);
469 entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
470 if (entry == ZPCI_IOMAP_ENTRIES) {
471 spin_unlock(&zpci_iomap_lock);
472 return -ENOSPC;
474 set_bit(entry, zpci_iomap_bitmap);
475 spin_unlock(&zpci_iomap_lock);
476 return entry;
479 static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
481 spin_lock(&zpci_iomap_lock);
482 memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
483 clear_bit(entry, zpci_iomap_bitmap);
484 spin_unlock(&zpci_iomap_lock);
487 static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
488 unsigned long size, unsigned long flags)
490 struct resource *r;
492 r = kzalloc(sizeof(*r), GFP_KERNEL);
493 if (!r)
494 return NULL;
496 r->start = start;
497 r->end = r->start + size - 1;
498 r->flags = flags;
499 r->name = zdev->res_name;
501 if (request_resource(&iomem_resource, r)) {
502 kfree(r);
503 return NULL;
505 return r;
508 static int zpci_setup_bus_resources(struct zpci_dev *zdev,
509 struct list_head *resources)
511 unsigned long addr, size, flags;
512 struct resource *res;
513 int i, entry;
515 snprintf(zdev->res_name, sizeof(zdev->res_name),
516 "PCI Bus %04x:%02x", zdev->domain, ZPCI_BUS_NR);
518 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
519 if (!zdev->bars[i].size)
520 continue;
521 entry = zpci_alloc_iomap(zdev);
522 if (entry < 0)
523 return entry;
524 zdev->bars[i].map_idx = entry;
526 /* only MMIO is supported */
527 flags = IORESOURCE_MEM;
528 if (zdev->bars[i].val & 8)
529 flags |= IORESOURCE_PREFETCH;
530 if (zdev->bars[i].val & 4)
531 flags |= IORESOURCE_MEM_64;
533 if (zpci_use_mio(zdev))
534 addr = (unsigned long) zdev->bars[i].mio_wb;
535 else
536 addr = ZPCI_ADDR(entry);
537 size = 1UL << zdev->bars[i].size;
539 res = __alloc_res(zdev, addr, size, flags);
540 if (!res) {
541 zpci_free_iomap(zdev, entry);
542 return -ENOMEM;
544 zdev->bars[i].res = res;
545 pci_add_resource(resources, res);
548 return 0;
551 static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
553 int i;
555 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
556 if (!zdev->bars[i].size || !zdev->bars[i].res)
557 continue;
559 zpci_free_iomap(zdev, zdev->bars[i].map_idx);
560 release_resource(zdev->bars[i].res);
561 kfree(zdev->bars[i].res);
565 int pcibios_add_device(struct pci_dev *pdev)
567 struct resource *res;
568 int i;
570 if (pdev->is_physfn)
571 pdev->no_vf_scan = 1;
573 pdev->dev.groups = zpci_attr_groups;
574 pdev->dev.dma_ops = &s390_pci_dma_ops;
575 zpci_map_resources(pdev);
577 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
578 res = &pdev->resource[i];
579 if (res->parent || !res->flags)
580 continue;
581 pci_claim_resource(pdev, i);
584 return 0;
587 void pcibios_release_device(struct pci_dev *pdev)
589 zpci_unmap_resources(pdev);
592 int pcibios_enable_device(struct pci_dev *pdev, int mask)
594 struct zpci_dev *zdev = to_zpci(pdev);
596 zpci_debug_init_device(zdev, dev_name(&pdev->dev));
597 zpci_fmb_enable_device(zdev);
599 return pci_enable_resources(pdev, mask);
602 void pcibios_disable_device(struct pci_dev *pdev)
604 struct zpci_dev *zdev = to_zpci(pdev);
606 zpci_fmb_disable_device(zdev);
607 zpci_debug_exit_device(zdev);
610 #ifdef CONFIG_HIBERNATE_CALLBACKS
611 static int zpci_restore(struct device *dev)
613 struct pci_dev *pdev = to_pci_dev(dev);
614 struct zpci_dev *zdev = to_zpci(pdev);
615 int ret = 0;
617 if (zdev->state != ZPCI_FN_STATE_ONLINE)
618 goto out;
620 ret = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
621 if (ret)
622 goto out;
624 zpci_map_resources(pdev);
625 zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
626 (u64) zdev->dma_table);
628 out:
629 return ret;
632 static int zpci_freeze(struct device *dev)
634 struct pci_dev *pdev = to_pci_dev(dev);
635 struct zpci_dev *zdev = to_zpci(pdev);
637 if (zdev->state != ZPCI_FN_STATE_ONLINE)
638 return 0;
640 zpci_unregister_ioat(zdev, 0);
641 zpci_unmap_resources(pdev);
642 return clp_disable_fh(zdev);
645 struct dev_pm_ops pcibios_pm_ops = {
646 .thaw_noirq = zpci_restore,
647 .freeze_noirq = zpci_freeze,
648 .restore_noirq = zpci_restore,
649 .poweroff_noirq = zpci_freeze,
651 #endif /* CONFIG_HIBERNATE_CALLBACKS */
653 static int zpci_alloc_domain(struct zpci_dev *zdev)
655 if (zpci_unique_uid) {
656 zdev->domain = (u16) zdev->uid;
657 if (zdev->domain >= ZPCI_NR_DEVICES)
658 return 0;
660 spin_lock(&zpci_domain_lock);
661 if (test_bit(zdev->domain, zpci_domain)) {
662 spin_unlock(&zpci_domain_lock);
663 pr_err("Adding PCI function %08x failed because domain %04x is already assigned\n",
664 zdev->fid, zdev->domain);
665 return -EEXIST;
667 set_bit(zdev->domain, zpci_domain);
668 spin_unlock(&zpci_domain_lock);
669 return 0;
672 spin_lock(&zpci_domain_lock);
673 zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
674 if (zdev->domain == ZPCI_NR_DEVICES) {
675 spin_unlock(&zpci_domain_lock);
676 pr_err("Adding PCI function %08x failed because the configured limit of %d is reached\n",
677 zdev->fid, ZPCI_NR_DEVICES);
678 return -ENOSPC;
680 set_bit(zdev->domain, zpci_domain);
681 spin_unlock(&zpci_domain_lock);
682 return 0;
685 static void zpci_free_domain(struct zpci_dev *zdev)
687 if (zdev->domain >= ZPCI_NR_DEVICES)
688 return;
690 spin_lock(&zpci_domain_lock);
691 clear_bit(zdev->domain, zpci_domain);
692 spin_unlock(&zpci_domain_lock);
695 void pcibios_remove_bus(struct pci_bus *bus)
697 struct zpci_dev *zdev = get_zdev_by_bus(bus);
699 zpci_exit_slot(zdev);
700 zpci_cleanup_bus_resources(zdev);
701 zpci_destroy_iommu(zdev);
702 zpci_free_domain(zdev);
704 spin_lock(&zpci_list_lock);
705 list_del(&zdev->entry);
706 spin_unlock(&zpci_list_lock);
708 zpci_dbg(3, "rem fid:%x\n", zdev->fid);
709 kfree(zdev);
712 static int zpci_scan_bus(struct zpci_dev *zdev)
714 LIST_HEAD(resources);
715 int ret;
717 ret = zpci_setup_bus_resources(zdev, &resources);
718 if (ret)
719 goto error;
721 zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
722 zdev, &resources);
723 if (!zdev->bus) {
724 ret = -EIO;
725 goto error;
727 zdev->bus->max_bus_speed = zdev->max_bus_speed;
728 pci_bus_add_devices(zdev->bus);
729 return 0;
731 error:
732 zpci_cleanup_bus_resources(zdev);
733 pci_free_resource_list(&resources);
734 return ret;
737 int zpci_enable_device(struct zpci_dev *zdev)
739 int rc;
741 rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
742 if (rc)
743 goto out;
745 rc = zpci_dma_init_device(zdev);
746 if (rc)
747 goto out_dma;
749 zdev->state = ZPCI_FN_STATE_ONLINE;
750 return 0;
752 out_dma:
753 clp_disable_fh(zdev);
754 out:
755 return rc;
757 EXPORT_SYMBOL_GPL(zpci_enable_device);
759 int zpci_disable_device(struct zpci_dev *zdev)
761 zpci_dma_exit_device(zdev);
762 return clp_disable_fh(zdev);
764 EXPORT_SYMBOL_GPL(zpci_disable_device);
766 int zpci_create_device(struct zpci_dev *zdev)
768 int rc;
770 rc = zpci_alloc_domain(zdev);
771 if (rc)
772 goto out;
774 rc = zpci_init_iommu(zdev);
775 if (rc)
776 goto out_free;
778 mutex_init(&zdev->lock);
779 if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
780 rc = zpci_enable_device(zdev);
781 if (rc)
782 goto out_destroy_iommu;
784 rc = zpci_scan_bus(zdev);
785 if (rc)
786 goto out_disable;
788 spin_lock(&zpci_list_lock);
789 list_add_tail(&zdev->entry, &zpci_list);
790 spin_unlock(&zpci_list_lock);
792 zpci_init_slot(zdev);
794 return 0;
796 out_disable:
797 if (zdev->state == ZPCI_FN_STATE_ONLINE)
798 zpci_disable_device(zdev);
799 out_destroy_iommu:
800 zpci_destroy_iommu(zdev);
801 out_free:
802 zpci_free_domain(zdev);
803 out:
804 return rc;
807 void zpci_remove_device(struct zpci_dev *zdev)
809 if (!zdev->bus)
810 return;
812 pci_stop_root_bus(zdev->bus);
813 pci_remove_root_bus(zdev->bus);
816 int zpci_report_error(struct pci_dev *pdev,
817 struct zpci_report_error_header *report)
819 struct zpci_dev *zdev = to_zpci(pdev);
821 return sclp_pci_report(report, zdev->fh, zdev->fid);
823 EXPORT_SYMBOL(zpci_report_error);
825 static int zpci_mem_init(void)
827 BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
828 __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
830 zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
831 __alignof__(struct zpci_fmb), 0, NULL);
832 if (!zdev_fmb_cache)
833 goto error_fmb;
835 zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
836 sizeof(*zpci_iomap_start), GFP_KERNEL);
837 if (!zpci_iomap_start)
838 goto error_iomap;
840 zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
841 sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
842 if (!zpci_iomap_bitmap)
843 goto error_iomap_bitmap;
845 return 0;
846 error_iomap_bitmap:
847 kfree(zpci_iomap_start);
848 error_iomap:
849 kmem_cache_destroy(zdev_fmb_cache);
850 error_fmb:
851 return -ENOMEM;
854 static void zpci_mem_exit(void)
856 kfree(zpci_iomap_bitmap);
857 kfree(zpci_iomap_start);
858 kmem_cache_destroy(zdev_fmb_cache);
861 static unsigned int s390_pci_probe __initdata = 1;
862 static unsigned int s390_pci_no_mio __initdata;
863 unsigned int s390_pci_force_floating __initdata;
864 static unsigned int s390_pci_initialized;
866 char * __init pcibios_setup(char *str)
868 if (!strcmp(str, "off")) {
869 s390_pci_probe = 0;
870 return NULL;
872 if (!strcmp(str, "nomio")) {
873 s390_pci_no_mio = 1;
874 return NULL;
876 if (!strcmp(str, "force_floating")) {
877 s390_pci_force_floating = 1;
878 return NULL;
880 return str;
883 bool zpci_is_enabled(void)
885 return s390_pci_initialized;
888 static int __init pci_base_init(void)
890 int rc;
892 if (!s390_pci_probe)
893 return 0;
895 if (!test_facility(69) || !test_facility(71))
896 return 0;
898 if (test_facility(153) && !s390_pci_no_mio) {
899 static_branch_enable(&have_mio);
900 ctl_set_bit(2, 5);
903 rc = zpci_debug_init();
904 if (rc)
905 goto out;
907 rc = zpci_mem_init();
908 if (rc)
909 goto out_mem;
911 rc = zpci_irq_init();
912 if (rc)
913 goto out_irq;
915 rc = zpci_dma_init();
916 if (rc)
917 goto out_dma;
919 rc = clp_scan_pci_devices();
920 if (rc)
921 goto out_find;
923 s390_pci_initialized = 1;
924 return 0;
926 out_find:
927 zpci_dma_exit();
928 out_dma:
929 zpci_irq_exit();
930 out_irq:
931 zpci_mem_exit();
932 out_mem:
933 zpci_debug_exit();
934 out:
935 return rc;
937 subsys_initcall_sync(pci_base_init);
939 void zpci_rescan(void)
941 if (zpci_is_enabled())
942 clp_rescan_pci_devices_simple(NULL);