1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2012
6 * Jan Glauber <jang@linux.vnet.ibm.com>
8 * The System z PCI code is a rewrite from a prototype by
9 * the following people (Kudoz!):
19 #define KMSG_COMPONENT "zpci"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22 #include <linux/kernel.h>
23 #include <linux/slab.h>
24 #include <linux/err.h>
25 #include <linux/export.h>
26 #include <linux/delay.h>
27 #include <linux/seq_file.h>
28 #include <linux/jump_label.h>
29 #include <linux/pci.h>
30 #include <linux/printk.h>
34 #include <asm/facility.h>
35 #include <asm/pci_insn.h>
36 #include <asm/pci_clp.h>
37 #include <asm/pci_dma.h>
42 /* list of all detected zpci devices */
43 static LIST_HEAD(zpci_list
);
44 static DEFINE_SPINLOCK(zpci_list_lock
);
46 static DECLARE_BITMAP(zpci_domain
, ZPCI_DOMAIN_BITMAP_SIZE
);
47 static DEFINE_SPINLOCK(zpci_domain_lock
);
49 #define ZPCI_IOMAP_ENTRIES \
50 min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2), \
51 ZPCI_IOMAP_MAX_ENTRIES)
53 unsigned int s390_pci_no_rid
;
55 static DEFINE_SPINLOCK(zpci_iomap_lock
);
56 static unsigned long *zpci_iomap_bitmap
;
57 struct zpci_iomap_entry
*zpci_iomap_start
;
58 EXPORT_SYMBOL_GPL(zpci_iomap_start
);
60 DEFINE_STATIC_KEY_FALSE(have_mio
);
62 static struct kmem_cache
*zdev_fmb_cache
;
64 struct zpci_dev
*get_zdev_by_fid(u32 fid
)
66 struct zpci_dev
*tmp
, *zdev
= NULL
;
68 spin_lock(&zpci_list_lock
);
69 list_for_each_entry(tmp
, &zpci_list
, entry
) {
70 if (tmp
->fid
== fid
) {
75 spin_unlock(&zpci_list_lock
);
79 void zpci_remove_reserved_devices(void)
81 struct zpci_dev
*tmp
, *zdev
;
82 enum zpci_state state
;
85 spin_lock(&zpci_list_lock
);
86 list_for_each_entry_safe(zdev
, tmp
, &zpci_list
, entry
) {
87 if (zdev
->state
== ZPCI_FN_STATE_STANDBY
&&
88 !clp_get_state(zdev
->fid
, &state
) &&
89 state
== ZPCI_FN_STATE_RESERVED
)
90 list_move_tail(&zdev
->entry
, &remove
);
92 spin_unlock(&zpci_list_lock
);
94 list_for_each_entry_safe(zdev
, tmp
, &remove
, entry
)
98 int pci_domain_nr(struct pci_bus
*bus
)
100 return ((struct zpci_bus
*) bus
->sysdata
)->domain_nr
;
102 EXPORT_SYMBOL_GPL(pci_domain_nr
);
104 int pci_proc_domain(struct pci_bus
*bus
)
106 return pci_domain_nr(bus
);
108 EXPORT_SYMBOL_GPL(pci_proc_domain
);
110 /* Modify PCI: Register I/O address translation parameters */
111 int zpci_register_ioat(struct zpci_dev
*zdev
, u8 dmaas
,
112 u64 base
, u64 limit
, u64 iota
)
114 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, dmaas
, ZPCI_MOD_FC_REG_IOAT
);
115 struct zpci_fib fib
= {0};
118 WARN_ON_ONCE(iota
& 0x3fff);
121 fib
.iota
= iota
| ZPCI_IOTA_RTTO_FLAG
;
122 return zpci_mod_fc(req
, &fib
, &status
) ? -EIO
: 0;
125 /* Modify PCI: Unregister I/O address translation parameters */
126 int zpci_unregister_ioat(struct zpci_dev
*zdev
, u8 dmaas
)
128 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, dmaas
, ZPCI_MOD_FC_DEREG_IOAT
);
129 struct zpci_fib fib
= {0};
132 cc
= zpci_mod_fc(req
, &fib
, &status
);
133 if (cc
== 3) /* Function already gone. */
135 return cc
? -EIO
: 0;
138 /* Modify PCI: Set PCI function measurement parameters */
139 int zpci_fmb_enable_device(struct zpci_dev
*zdev
)
141 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, 0, ZPCI_MOD_FC_SET_MEASURE
);
142 struct zpci_fib fib
= {0};
145 if (zdev
->fmb
|| sizeof(*zdev
->fmb
) < zdev
->fmb_length
)
148 zdev
->fmb
= kmem_cache_zalloc(zdev_fmb_cache
, GFP_KERNEL
);
151 WARN_ON((u64
) zdev
->fmb
& 0xf);
153 /* reset software counters */
154 atomic64_set(&zdev
->allocated_pages
, 0);
155 atomic64_set(&zdev
->mapped_pages
, 0);
156 atomic64_set(&zdev
->unmapped_pages
, 0);
158 fib
.fmb_addr
= virt_to_phys(zdev
->fmb
);
159 cc
= zpci_mod_fc(req
, &fib
, &status
);
161 kmem_cache_free(zdev_fmb_cache
, zdev
->fmb
);
164 return cc
? -EIO
: 0;
167 /* Modify PCI: Disable PCI function measurement */
168 int zpci_fmb_disable_device(struct zpci_dev
*zdev
)
170 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, 0, ZPCI_MOD_FC_SET_MEASURE
);
171 struct zpci_fib fib
= {0};
177 /* Function measurement is disabled if fmb address is zero */
178 cc
= zpci_mod_fc(req
, &fib
, &status
);
179 if (cc
== 3) /* Function already gone. */
183 kmem_cache_free(zdev_fmb_cache
, zdev
->fmb
);
186 return cc
? -EIO
: 0;
189 static int zpci_cfg_load(struct zpci_dev
*zdev
, int offset
, u32
*val
, u8 len
)
191 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, ZPCI_PCIAS_CFGSPC
, len
);
195 rc
= __zpci_load(&data
, req
, offset
);
197 data
= le64_to_cpu((__force __le64
) data
);
198 data
>>= (8 - len
) * 8;
205 static int zpci_cfg_store(struct zpci_dev
*zdev
, int offset
, u32 val
, u8 len
)
207 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, ZPCI_PCIAS_CFGSPC
, len
);
211 data
<<= (8 - len
) * 8;
212 data
= (__force u64
) cpu_to_le64(data
);
213 rc
= __zpci_store(data
, req
, offset
);
217 resource_size_t
pcibios_align_resource(void *data
, const struct resource
*res
,
218 resource_size_t size
,
219 resource_size_t align
)
224 /* combine single writes by using store-block insn */
225 void __iowrite64_copy(void __iomem
*to
, const void *from
, size_t count
)
227 zpci_memcpy_toio(to
, from
, count
);
230 static void __iomem
*__ioremap(phys_addr_t addr
, size_t size
, pgprot_t prot
)
232 unsigned long offset
, vaddr
;
233 struct vm_struct
*area
;
234 phys_addr_t last_addr
;
236 last_addr
= addr
+ size
- 1;
237 if (!size
|| last_addr
< addr
)
240 if (!static_branch_unlikely(&have_mio
))
241 return (void __iomem
*) addr
;
243 offset
= addr
& ~PAGE_MASK
;
245 size
= PAGE_ALIGN(size
+ offset
);
246 area
= get_vm_area(size
, VM_IOREMAP
);
250 vaddr
= (unsigned long) area
->addr
;
251 if (ioremap_page_range(vaddr
, vaddr
+ size
, addr
, prot
)) {
255 return (void __iomem
*) ((unsigned long) area
->addr
+ offset
);
258 void __iomem
*ioremap_prot(phys_addr_t addr
, size_t size
, unsigned long prot
)
260 return __ioremap(addr
, size
, __pgprot(prot
));
262 EXPORT_SYMBOL(ioremap_prot
);
264 void __iomem
*ioremap(phys_addr_t addr
, size_t size
)
266 return __ioremap(addr
, size
, PAGE_KERNEL
);
268 EXPORT_SYMBOL(ioremap
);
270 void __iomem
*ioremap_wc(phys_addr_t addr
, size_t size
)
272 return __ioremap(addr
, size
, pgprot_writecombine(PAGE_KERNEL
));
274 EXPORT_SYMBOL(ioremap_wc
);
276 void __iomem
*ioremap_wt(phys_addr_t addr
, size_t size
)
278 return __ioremap(addr
, size
, pgprot_writethrough(PAGE_KERNEL
));
280 EXPORT_SYMBOL(ioremap_wt
);
282 void iounmap(volatile void __iomem
*addr
)
284 if (static_branch_likely(&have_mio
))
285 vunmap((__force
void *) ((unsigned long) addr
& PAGE_MASK
));
287 EXPORT_SYMBOL(iounmap
);
289 /* Create a virtual mapping cookie for a PCI BAR */
290 static void __iomem
*pci_iomap_range_fh(struct pci_dev
*pdev
, int bar
,
291 unsigned long offset
, unsigned long max
)
293 struct zpci_dev
*zdev
= to_zpci(pdev
);
296 idx
= zdev
->bars
[bar
].map_idx
;
297 spin_lock(&zpci_iomap_lock
);
299 WARN_ON(!++zpci_iomap_start
[idx
].count
);
300 zpci_iomap_start
[idx
].fh
= zdev
->fh
;
301 zpci_iomap_start
[idx
].bar
= bar
;
302 spin_unlock(&zpci_iomap_lock
);
304 return (void __iomem
*) ZPCI_ADDR(idx
) + offset
;
307 static void __iomem
*pci_iomap_range_mio(struct pci_dev
*pdev
, int bar
,
308 unsigned long offset
,
311 unsigned long barsize
= pci_resource_len(pdev
, bar
);
312 struct zpci_dev
*zdev
= to_zpci(pdev
);
315 iova
= ioremap((unsigned long) zdev
->bars
[bar
].mio_wt
, barsize
);
316 return iova
? iova
+ offset
: iova
;
319 void __iomem
*pci_iomap_range(struct pci_dev
*pdev
, int bar
,
320 unsigned long offset
, unsigned long max
)
322 if (bar
>= PCI_STD_NUM_BARS
|| !pci_resource_len(pdev
, bar
))
325 if (static_branch_likely(&have_mio
))
326 return pci_iomap_range_mio(pdev
, bar
, offset
, max
);
328 return pci_iomap_range_fh(pdev
, bar
, offset
, max
);
330 EXPORT_SYMBOL(pci_iomap_range
);
332 void __iomem
*pci_iomap(struct pci_dev
*dev
, int bar
, unsigned long maxlen
)
334 return pci_iomap_range(dev
, bar
, 0, maxlen
);
336 EXPORT_SYMBOL(pci_iomap
);
338 static void __iomem
*pci_iomap_wc_range_mio(struct pci_dev
*pdev
, int bar
,
339 unsigned long offset
, unsigned long max
)
341 unsigned long barsize
= pci_resource_len(pdev
, bar
);
342 struct zpci_dev
*zdev
= to_zpci(pdev
);
345 iova
= ioremap((unsigned long) zdev
->bars
[bar
].mio_wb
, barsize
);
346 return iova
? iova
+ offset
: iova
;
349 void __iomem
*pci_iomap_wc_range(struct pci_dev
*pdev
, int bar
,
350 unsigned long offset
, unsigned long max
)
352 if (bar
>= PCI_STD_NUM_BARS
|| !pci_resource_len(pdev
, bar
))
355 if (static_branch_likely(&have_mio
))
356 return pci_iomap_wc_range_mio(pdev
, bar
, offset
, max
);
358 return pci_iomap_range_fh(pdev
, bar
, offset
, max
);
360 EXPORT_SYMBOL(pci_iomap_wc_range
);
362 void __iomem
*pci_iomap_wc(struct pci_dev
*dev
, int bar
, unsigned long maxlen
)
364 return pci_iomap_wc_range(dev
, bar
, 0, maxlen
);
366 EXPORT_SYMBOL(pci_iomap_wc
);
368 static void pci_iounmap_fh(struct pci_dev
*pdev
, void __iomem
*addr
)
370 unsigned int idx
= ZPCI_IDX(addr
);
372 spin_lock(&zpci_iomap_lock
);
373 /* Detect underrun */
374 WARN_ON(!zpci_iomap_start
[idx
].count
);
375 if (!--zpci_iomap_start
[idx
].count
) {
376 zpci_iomap_start
[idx
].fh
= 0;
377 zpci_iomap_start
[idx
].bar
= 0;
379 spin_unlock(&zpci_iomap_lock
);
382 static void pci_iounmap_mio(struct pci_dev
*pdev
, void __iomem
*addr
)
387 void pci_iounmap(struct pci_dev
*pdev
, void __iomem
*addr
)
389 if (static_branch_likely(&have_mio
))
390 pci_iounmap_mio(pdev
, addr
);
392 pci_iounmap_fh(pdev
, addr
);
394 EXPORT_SYMBOL(pci_iounmap
);
396 static int pci_read(struct pci_bus
*bus
, unsigned int devfn
, int where
,
399 struct zpci_dev
*zdev
= get_zdev_by_bus(bus
, devfn
);
401 return (zdev
) ? zpci_cfg_load(zdev
, where
, val
, size
) : -ENODEV
;
404 static int pci_write(struct pci_bus
*bus
, unsigned int devfn
, int where
,
407 struct zpci_dev
*zdev
= get_zdev_by_bus(bus
, devfn
);
409 return (zdev
) ? zpci_cfg_store(zdev
, where
, val
, size
) : -ENODEV
;
412 static struct pci_ops pci_root_ops
= {
417 static void zpci_map_resources(struct pci_dev
*pdev
)
419 struct zpci_dev
*zdev
= to_zpci(pdev
);
423 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++) {
424 len
= pci_resource_len(pdev
, i
);
428 if (zpci_use_mio(zdev
))
429 pdev
->resource
[i
].start
=
430 (resource_size_t __force
) zdev
->bars
[i
].mio_wt
;
432 pdev
->resource
[i
].start
= (resource_size_t __force
)
433 pci_iomap_range_fh(pdev
, i
, 0, 0);
434 pdev
->resource
[i
].end
= pdev
->resource
[i
].start
+ len
- 1;
437 zpci_iov_map_resources(pdev
);
440 static void zpci_unmap_resources(struct pci_dev
*pdev
)
442 struct zpci_dev
*zdev
= to_zpci(pdev
);
446 if (zpci_use_mio(zdev
))
449 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++) {
450 len
= pci_resource_len(pdev
, i
);
453 pci_iounmap_fh(pdev
, (void __iomem __force
*)
454 pdev
->resource
[i
].start
);
458 static int zpci_alloc_iomap(struct zpci_dev
*zdev
)
462 spin_lock(&zpci_iomap_lock
);
463 entry
= find_first_zero_bit(zpci_iomap_bitmap
, ZPCI_IOMAP_ENTRIES
);
464 if (entry
== ZPCI_IOMAP_ENTRIES
) {
465 spin_unlock(&zpci_iomap_lock
);
468 set_bit(entry
, zpci_iomap_bitmap
);
469 spin_unlock(&zpci_iomap_lock
);
473 static void zpci_free_iomap(struct zpci_dev
*zdev
, int entry
)
475 spin_lock(&zpci_iomap_lock
);
476 memset(&zpci_iomap_start
[entry
], 0, sizeof(struct zpci_iomap_entry
));
477 clear_bit(entry
, zpci_iomap_bitmap
);
478 spin_unlock(&zpci_iomap_lock
);
481 static struct resource
*__alloc_res(struct zpci_dev
*zdev
, unsigned long start
,
482 unsigned long size
, unsigned long flags
)
486 r
= kzalloc(sizeof(*r
), GFP_KERNEL
);
491 r
->end
= r
->start
+ size
- 1;
493 r
->name
= zdev
->res_name
;
495 if (request_resource(&iomem_resource
, r
)) {
502 int zpci_setup_bus_resources(struct zpci_dev
*zdev
,
503 struct list_head
*resources
)
505 unsigned long addr
, size
, flags
;
506 struct resource
*res
;
509 snprintf(zdev
->res_name
, sizeof(zdev
->res_name
),
510 "PCI Bus %04x:%02x", zdev
->uid
, ZPCI_BUS_NR
);
512 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++) {
513 if (!zdev
->bars
[i
].size
)
515 entry
= zpci_alloc_iomap(zdev
);
518 zdev
->bars
[i
].map_idx
= entry
;
520 /* only MMIO is supported */
521 flags
= IORESOURCE_MEM
;
522 if (zdev
->bars
[i
].val
& 8)
523 flags
|= IORESOURCE_PREFETCH
;
524 if (zdev
->bars
[i
].val
& 4)
525 flags
|= IORESOURCE_MEM_64
;
527 if (zpci_use_mio(zdev
))
528 addr
= (unsigned long) zdev
->bars
[i
].mio_wt
;
530 addr
= ZPCI_ADDR(entry
);
531 size
= 1UL << zdev
->bars
[i
].size
;
533 res
= __alloc_res(zdev
, addr
, size
, flags
);
535 zpci_free_iomap(zdev
, entry
);
538 zdev
->bars
[i
].res
= res
;
539 pci_add_resource(resources
, res
);
545 static void zpci_cleanup_bus_resources(struct zpci_dev
*zdev
)
549 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++) {
550 if (!zdev
->bars
[i
].size
|| !zdev
->bars
[i
].res
)
553 zpci_free_iomap(zdev
, zdev
->bars
[i
].map_idx
);
554 release_resource(zdev
->bars
[i
].res
);
555 kfree(zdev
->bars
[i
].res
);
559 int pcibios_add_device(struct pci_dev
*pdev
)
561 struct resource
*res
;
565 pdev
->no_vf_scan
= 1;
567 pdev
->dev
.groups
= zpci_attr_groups
;
568 pdev
->dev
.dma_ops
= &s390_pci_dma_ops
;
569 zpci_map_resources(pdev
);
571 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++) {
572 res
= &pdev
->resource
[i
];
573 if (res
->parent
|| !res
->flags
)
575 pci_claim_resource(pdev
, i
);
581 void pcibios_release_device(struct pci_dev
*pdev
)
583 zpci_unmap_resources(pdev
);
586 int pcibios_enable_device(struct pci_dev
*pdev
, int mask
)
588 struct zpci_dev
*zdev
= to_zpci(pdev
);
590 zpci_debug_init_device(zdev
, dev_name(&pdev
->dev
));
591 zpci_fmb_enable_device(zdev
);
593 return pci_enable_resources(pdev
, mask
);
596 void pcibios_disable_device(struct pci_dev
*pdev
)
598 struct zpci_dev
*zdev
= to_zpci(pdev
);
600 zpci_fmb_disable_device(zdev
);
601 zpci_debug_exit_device(zdev
);
604 static int __zpci_register_domain(int domain
)
606 spin_lock(&zpci_domain_lock
);
607 if (test_bit(domain
, zpci_domain
)) {
608 spin_unlock(&zpci_domain_lock
);
609 pr_err("Domain %04x is already assigned\n", domain
);
612 set_bit(domain
, zpci_domain
);
613 spin_unlock(&zpci_domain_lock
);
617 static int __zpci_alloc_domain(void)
621 spin_lock(&zpci_domain_lock
);
623 * We can always auto allocate domains below ZPCI_NR_DEVICES.
624 * There is either a free domain or we have reached the maximum in
625 * which case we would have bailed earlier.
627 domain
= find_first_zero_bit(zpci_domain
, ZPCI_NR_DEVICES
);
628 set_bit(domain
, zpci_domain
);
629 spin_unlock(&zpci_domain_lock
);
633 int zpci_alloc_domain(int domain
)
635 if (zpci_unique_uid
) {
637 return __zpci_register_domain(domain
);
638 pr_warn("UID checking was active but no UID is provided: switching to automatic domain allocation\n");
639 update_uid_checking(false);
641 return __zpci_alloc_domain();
644 void zpci_free_domain(int domain
)
646 spin_lock(&zpci_domain_lock
);
647 clear_bit(domain
, zpci_domain
);
648 spin_unlock(&zpci_domain_lock
);
652 int zpci_enable_device(struct zpci_dev
*zdev
)
656 rc
= clp_enable_fh(zdev
, ZPCI_NR_DMA_SPACES
);
660 rc
= zpci_dma_init_device(zdev
);
664 zdev
->state
= ZPCI_FN_STATE_ONLINE
;
668 clp_disable_fh(zdev
);
672 EXPORT_SYMBOL_GPL(zpci_enable_device
);
674 int zpci_disable_device(struct zpci_dev
*zdev
)
676 zpci_dma_exit_device(zdev
);
678 * The zPCI function may already be disabled by the platform, this is
679 * detected in clp_disable_fh() which becomes a no-op.
681 return clp_disable_fh(zdev
);
683 EXPORT_SYMBOL_GPL(zpci_disable_device
);
685 void zpci_remove_device(struct zpci_dev
*zdev
)
687 struct zpci_bus
*zbus
= zdev
->zbus
;
688 struct pci_dev
*pdev
;
690 pdev
= pci_get_slot(zbus
->bus
, zdev
->devfn
);
693 return zpci_iov_remove_virtfn(pdev
, zdev
->vfn
);
694 pci_stop_and_remove_bus_device_locked(pdev
);
698 int zpci_create_device(struct zpci_dev
*zdev
)
702 kref_init(&zdev
->kref
);
704 spin_lock(&zpci_list_lock
);
705 list_add_tail(&zdev
->entry
, &zpci_list
);
706 spin_unlock(&zpci_list_lock
);
708 rc
= zpci_init_iommu(zdev
);
712 mutex_init(&zdev
->lock
);
713 if (zdev
->state
== ZPCI_FN_STATE_CONFIGURED
) {
714 rc
= zpci_enable_device(zdev
);
716 goto out_destroy_iommu
;
719 rc
= zpci_bus_device_register(zdev
, &pci_root_ops
);
726 if (zdev
->state
== ZPCI_FN_STATE_ONLINE
)
727 zpci_disable_device(zdev
);
730 zpci_destroy_iommu(zdev
);
732 spin_lock(&zpci_list_lock
);
733 list_del(&zdev
->entry
);
734 spin_unlock(&zpci_list_lock
);
738 void zpci_release_device(struct kref
*kref
)
740 struct zpci_dev
*zdev
= container_of(kref
, struct zpci_dev
, kref
);
743 zpci_remove_device(zdev
);
745 switch (zdev
->state
) {
746 case ZPCI_FN_STATE_ONLINE
:
747 case ZPCI_FN_STATE_CONFIGURED
:
748 zpci_disable_device(zdev
);
750 case ZPCI_FN_STATE_STANDBY
:
751 if (zdev
->has_hp_slot
)
752 zpci_exit_slot(zdev
);
753 zpci_cleanup_bus_resources(zdev
);
754 zpci_bus_device_unregister(zdev
);
755 zpci_destroy_iommu(zdev
);
761 spin_lock(&zpci_list_lock
);
762 list_del(&zdev
->entry
);
763 spin_unlock(&zpci_list_lock
);
764 zpci_dbg(3, "rem fid:%x\n", zdev
->fid
);
768 int zpci_report_error(struct pci_dev
*pdev
,
769 struct zpci_report_error_header
*report
)
771 struct zpci_dev
*zdev
= to_zpci(pdev
);
773 return sclp_pci_report(report
, zdev
->fh
, zdev
->fid
);
775 EXPORT_SYMBOL(zpci_report_error
);
777 static int zpci_mem_init(void)
779 BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb
)) ||
780 __alignof__(struct zpci_fmb
) < sizeof(struct zpci_fmb
));
782 zdev_fmb_cache
= kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb
),
783 __alignof__(struct zpci_fmb
), 0, NULL
);
787 zpci_iomap_start
= kcalloc(ZPCI_IOMAP_ENTRIES
,
788 sizeof(*zpci_iomap_start
), GFP_KERNEL
);
789 if (!zpci_iomap_start
)
792 zpci_iomap_bitmap
= kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES
),
793 sizeof(*zpci_iomap_bitmap
), GFP_KERNEL
);
794 if (!zpci_iomap_bitmap
)
795 goto error_iomap_bitmap
;
797 if (static_branch_likely(&have_mio
))
798 clp_setup_writeback_mio();
802 kfree(zpci_iomap_start
);
804 kmem_cache_destroy(zdev_fmb_cache
);
809 static void zpci_mem_exit(void)
811 kfree(zpci_iomap_bitmap
);
812 kfree(zpci_iomap_start
);
813 kmem_cache_destroy(zdev_fmb_cache
);
816 static unsigned int s390_pci_probe __initdata
= 1;
817 static unsigned int s390_pci_no_mio __initdata
;
818 unsigned int s390_pci_force_floating __initdata
;
819 static unsigned int s390_pci_initialized
;
821 char * __init
pcibios_setup(char *str
)
823 if (!strcmp(str
, "off")) {
827 if (!strcmp(str
, "nomio")) {
831 if (!strcmp(str
, "force_floating")) {
832 s390_pci_force_floating
= 1;
835 if (!strcmp(str
, "norid")) {
842 bool zpci_is_enabled(void)
844 return s390_pci_initialized
;
847 static int __init
pci_base_init(void)
854 if (!test_facility(69) || !test_facility(71)) {
855 pr_info("PCI is not supported because CPU facilities 69 or 71 are not available\n");
859 if (test_facility(153) && !s390_pci_no_mio
) {
860 static_branch_enable(&have_mio
);
864 rc
= zpci_debug_init();
868 rc
= zpci_mem_init();
872 rc
= zpci_irq_init();
876 rc
= zpci_dma_init();
880 rc
= clp_scan_pci_devices();
884 s390_pci_initialized
= 1;
898 subsys_initcall_sync(pci_base_init
);