1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2012
6 * Jan Glauber <jang@linux.vnet.ibm.com>
8 * The System z PCI code is a rewrite from a prototype by
9 * the following people (Kudoz!):
19 #define KMSG_COMPONENT "zpci"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22 #include <linux/kernel.h>
23 #include <linux/slab.h>
24 #include <linux/err.h>
25 #include <linux/export.h>
26 #include <linux/delay.h>
27 #include <linux/seq_file.h>
28 #include <linux/jump_label.h>
29 #include <linux/pci.h>
30 #include <linux/printk.h>
34 #include <asm/facility.h>
35 #include <asm/pci_insn.h>
36 #include <asm/pci_clp.h>
37 #include <asm/pci_dma.h>
39 /* list of all detected zpci devices */
40 static LIST_HEAD(zpci_list
);
41 static DEFINE_SPINLOCK(zpci_list_lock
);
43 static DECLARE_BITMAP(zpci_domain
, ZPCI_NR_DEVICES
);
44 static DEFINE_SPINLOCK(zpci_domain_lock
);
46 #define ZPCI_IOMAP_ENTRIES \
47 min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2), \
48 ZPCI_IOMAP_MAX_ENTRIES)
50 static DEFINE_SPINLOCK(zpci_iomap_lock
);
51 static unsigned long *zpci_iomap_bitmap
;
52 struct zpci_iomap_entry
*zpci_iomap_start
;
53 EXPORT_SYMBOL_GPL(zpci_iomap_start
);
55 DEFINE_STATIC_KEY_FALSE(have_mio
);
57 static struct kmem_cache
*zdev_fmb_cache
;
59 struct zpci_dev
*get_zdev_by_fid(u32 fid
)
61 struct zpci_dev
*tmp
, *zdev
= NULL
;
63 spin_lock(&zpci_list_lock
);
64 list_for_each_entry(tmp
, &zpci_list
, entry
) {
65 if (tmp
->fid
== fid
) {
70 spin_unlock(&zpci_list_lock
);
74 void zpci_remove_reserved_devices(void)
76 struct zpci_dev
*tmp
, *zdev
;
77 enum zpci_state state
;
80 spin_lock(&zpci_list_lock
);
81 list_for_each_entry_safe(zdev
, tmp
, &zpci_list
, entry
) {
82 if (zdev
->state
== ZPCI_FN_STATE_STANDBY
&&
83 !clp_get_state(zdev
->fid
, &state
) &&
84 state
== ZPCI_FN_STATE_RESERVED
)
85 list_move_tail(&zdev
->entry
, &remove
);
87 spin_unlock(&zpci_list_lock
);
89 list_for_each_entry_safe(zdev
, tmp
, &remove
, entry
)
90 zpci_remove_device(zdev
);
93 static struct zpci_dev
*get_zdev_by_bus(struct pci_bus
*bus
)
95 return (bus
&& bus
->sysdata
) ? (struct zpci_dev
*) bus
->sysdata
: NULL
;
98 int pci_domain_nr(struct pci_bus
*bus
)
100 return ((struct zpci_dev
*) bus
->sysdata
)->domain
;
102 EXPORT_SYMBOL_GPL(pci_domain_nr
);
104 int pci_proc_domain(struct pci_bus
*bus
)
106 return pci_domain_nr(bus
);
108 EXPORT_SYMBOL_GPL(pci_proc_domain
);
110 /* Modify PCI: Register I/O address translation parameters */
111 int zpci_register_ioat(struct zpci_dev
*zdev
, u8 dmaas
,
112 u64 base
, u64 limit
, u64 iota
)
114 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, dmaas
, ZPCI_MOD_FC_REG_IOAT
);
115 struct zpci_fib fib
= {0};
118 WARN_ON_ONCE(iota
& 0x3fff);
121 fib
.iota
= iota
| ZPCI_IOTA_RTTO_FLAG
;
122 return zpci_mod_fc(req
, &fib
, &status
) ? -EIO
: 0;
125 /* Modify PCI: Unregister I/O address translation parameters */
126 int zpci_unregister_ioat(struct zpci_dev
*zdev
, u8 dmaas
)
128 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, dmaas
, ZPCI_MOD_FC_DEREG_IOAT
);
129 struct zpci_fib fib
= {0};
132 cc
= zpci_mod_fc(req
, &fib
, &status
);
133 if (cc
== 3) /* Function already gone. */
135 return cc
? -EIO
: 0;
138 /* Modify PCI: Set PCI function measurement parameters */
139 int zpci_fmb_enable_device(struct zpci_dev
*zdev
)
141 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, 0, ZPCI_MOD_FC_SET_MEASURE
);
142 struct zpci_fib fib
= {0};
145 if (zdev
->fmb
|| sizeof(*zdev
->fmb
) < zdev
->fmb_length
)
148 zdev
->fmb
= kmem_cache_zalloc(zdev_fmb_cache
, GFP_KERNEL
);
151 WARN_ON((u64
) zdev
->fmb
& 0xf);
153 /* reset software counters */
154 atomic64_set(&zdev
->allocated_pages
, 0);
155 atomic64_set(&zdev
->mapped_pages
, 0);
156 atomic64_set(&zdev
->unmapped_pages
, 0);
158 fib
.fmb_addr
= virt_to_phys(zdev
->fmb
);
159 cc
= zpci_mod_fc(req
, &fib
, &status
);
161 kmem_cache_free(zdev_fmb_cache
, zdev
->fmb
);
164 return cc
? -EIO
: 0;
167 /* Modify PCI: Disable PCI function measurement */
168 int zpci_fmb_disable_device(struct zpci_dev
*zdev
)
170 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, 0, ZPCI_MOD_FC_SET_MEASURE
);
171 struct zpci_fib fib
= {0};
177 /* Function measurement is disabled if fmb address is zero */
178 cc
= zpci_mod_fc(req
, &fib
, &status
);
179 if (cc
== 3) /* Function already gone. */
183 kmem_cache_free(zdev_fmb_cache
, zdev
->fmb
);
186 return cc
? -EIO
: 0;
189 static int zpci_cfg_load(struct zpci_dev
*zdev
, int offset
, u32
*val
, u8 len
)
191 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, ZPCI_PCIAS_CFGSPC
, len
);
195 rc
= __zpci_load(&data
, req
, offset
);
197 data
= le64_to_cpu((__force __le64
) data
);
198 data
>>= (8 - len
) * 8;
205 static int zpci_cfg_store(struct zpci_dev
*zdev
, int offset
, u32 val
, u8 len
)
207 u64 req
= ZPCI_CREATE_REQ(zdev
->fh
, ZPCI_PCIAS_CFGSPC
, len
);
211 data
<<= (8 - len
) * 8;
212 data
= (__force u64
) cpu_to_le64(data
);
213 rc
= __zpci_store(data
, req
, offset
);
217 resource_size_t
pcibios_align_resource(void *data
, const struct resource
*res
,
218 resource_size_t size
,
219 resource_size_t align
)
224 /* combine single writes by using store-block insn */
225 void __iowrite64_copy(void __iomem
*to
, const void *from
, size_t count
)
227 zpci_memcpy_toio(to
, from
, count
);
230 void __iomem
*ioremap(unsigned long ioaddr
, unsigned long size
)
232 struct vm_struct
*area
;
233 unsigned long offset
;
238 if (!static_branch_unlikely(&have_mio
))
239 return (void __iomem
*) ioaddr
;
241 offset
= ioaddr
& ~PAGE_MASK
;
243 size
= PAGE_ALIGN(size
+ offset
);
244 area
= get_vm_area(size
, VM_IOREMAP
);
248 if (ioremap_page_range((unsigned long) area
->addr
,
249 (unsigned long) area
->addr
+ size
,
250 ioaddr
, PAGE_KERNEL
)) {
254 return (void __iomem
*) ((unsigned long) area
->addr
+ offset
);
256 EXPORT_SYMBOL(ioremap
);
258 void iounmap(volatile void __iomem
*addr
)
260 if (static_branch_likely(&have_mio
))
261 vunmap((__force
void *) ((unsigned long) addr
& PAGE_MASK
));
263 EXPORT_SYMBOL(iounmap
);
265 /* Create a virtual mapping cookie for a PCI BAR */
266 static void __iomem
*pci_iomap_range_fh(struct pci_dev
*pdev
, int bar
,
267 unsigned long offset
, unsigned long max
)
269 struct zpci_dev
*zdev
= to_zpci(pdev
);
272 idx
= zdev
->bars
[bar
].map_idx
;
273 spin_lock(&zpci_iomap_lock
);
275 WARN_ON(!++zpci_iomap_start
[idx
].count
);
276 zpci_iomap_start
[idx
].fh
= zdev
->fh
;
277 zpci_iomap_start
[idx
].bar
= bar
;
278 spin_unlock(&zpci_iomap_lock
);
280 return (void __iomem
*) ZPCI_ADDR(idx
) + offset
;
283 static void __iomem
*pci_iomap_range_mio(struct pci_dev
*pdev
, int bar
,
284 unsigned long offset
,
287 unsigned long barsize
= pci_resource_len(pdev
, bar
);
288 struct zpci_dev
*zdev
= to_zpci(pdev
);
291 iova
= ioremap((unsigned long) zdev
->bars
[bar
].mio_wt
, barsize
);
292 return iova
? iova
+ offset
: iova
;
295 void __iomem
*pci_iomap_range(struct pci_dev
*pdev
, int bar
,
296 unsigned long offset
, unsigned long max
)
298 if (bar
>= PCI_STD_NUM_BARS
|| !pci_resource_len(pdev
, bar
))
301 if (static_branch_likely(&have_mio
))
302 return pci_iomap_range_mio(pdev
, bar
, offset
, max
);
304 return pci_iomap_range_fh(pdev
, bar
, offset
, max
);
306 EXPORT_SYMBOL(pci_iomap_range
);
308 void __iomem
*pci_iomap(struct pci_dev
*dev
, int bar
, unsigned long maxlen
)
310 return pci_iomap_range(dev
, bar
, 0, maxlen
);
312 EXPORT_SYMBOL(pci_iomap
);
314 static void __iomem
*pci_iomap_wc_range_mio(struct pci_dev
*pdev
, int bar
,
315 unsigned long offset
, unsigned long max
)
317 unsigned long barsize
= pci_resource_len(pdev
, bar
);
318 struct zpci_dev
*zdev
= to_zpci(pdev
);
321 iova
= ioremap((unsigned long) zdev
->bars
[bar
].mio_wb
, barsize
);
322 return iova
? iova
+ offset
: iova
;
325 void __iomem
*pci_iomap_wc_range(struct pci_dev
*pdev
, int bar
,
326 unsigned long offset
, unsigned long max
)
328 if (bar
>= PCI_STD_NUM_BARS
|| !pci_resource_len(pdev
, bar
))
331 if (static_branch_likely(&have_mio
))
332 return pci_iomap_wc_range_mio(pdev
, bar
, offset
, max
);
334 return pci_iomap_range_fh(pdev
, bar
, offset
, max
);
336 EXPORT_SYMBOL(pci_iomap_wc_range
);
338 void __iomem
*pci_iomap_wc(struct pci_dev
*dev
, int bar
, unsigned long maxlen
)
340 return pci_iomap_wc_range(dev
, bar
, 0, maxlen
);
342 EXPORT_SYMBOL(pci_iomap_wc
);
344 static void pci_iounmap_fh(struct pci_dev
*pdev
, void __iomem
*addr
)
346 unsigned int idx
= ZPCI_IDX(addr
);
348 spin_lock(&zpci_iomap_lock
);
349 /* Detect underrun */
350 WARN_ON(!zpci_iomap_start
[idx
].count
);
351 if (!--zpci_iomap_start
[idx
].count
) {
352 zpci_iomap_start
[idx
].fh
= 0;
353 zpci_iomap_start
[idx
].bar
= 0;
355 spin_unlock(&zpci_iomap_lock
);
358 static void pci_iounmap_mio(struct pci_dev
*pdev
, void __iomem
*addr
)
363 void pci_iounmap(struct pci_dev
*pdev
, void __iomem
*addr
)
365 if (static_branch_likely(&have_mio
))
366 pci_iounmap_mio(pdev
, addr
);
368 pci_iounmap_fh(pdev
, addr
);
370 EXPORT_SYMBOL(pci_iounmap
);
372 static int pci_read(struct pci_bus
*bus
, unsigned int devfn
, int where
,
375 struct zpci_dev
*zdev
= get_zdev_by_bus(bus
);
378 if (!zdev
|| devfn
!= ZPCI_DEVFN
)
381 ret
= zpci_cfg_load(zdev
, where
, val
, size
);
386 static int pci_write(struct pci_bus
*bus
, unsigned int devfn
, int where
,
389 struct zpci_dev
*zdev
= get_zdev_by_bus(bus
);
392 if (!zdev
|| devfn
!= ZPCI_DEVFN
)
395 ret
= zpci_cfg_store(zdev
, where
, val
, size
);
400 static struct pci_ops pci_root_ops
= {
405 #ifdef CONFIG_PCI_IOV
406 static struct resource iov_res
= {
407 .name
= "PCI IOV res",
410 .flags
= IORESOURCE_MEM
,
414 static void zpci_map_resources(struct pci_dev
*pdev
)
416 struct zpci_dev
*zdev
= to_zpci(pdev
);
420 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++) {
421 len
= pci_resource_len(pdev
, i
);
425 if (zpci_use_mio(zdev
))
426 pdev
->resource
[i
].start
=
427 (resource_size_t __force
) zdev
->bars
[i
].mio_wb
;
429 pdev
->resource
[i
].start
= (resource_size_t __force
)
430 pci_iomap_range_fh(pdev
, i
, 0, 0);
431 pdev
->resource
[i
].end
= pdev
->resource
[i
].start
+ len
- 1;
434 #ifdef CONFIG_PCI_IOV
435 for (i
= 0; i
< PCI_SRIOV_NUM_BARS
; i
++) {
436 int bar
= i
+ PCI_IOV_RESOURCES
;
438 len
= pci_resource_len(pdev
, bar
);
441 pdev
->resource
[bar
].parent
= &iov_res
;
446 static void zpci_unmap_resources(struct pci_dev
*pdev
)
448 struct zpci_dev
*zdev
= to_zpci(pdev
);
452 if (zpci_use_mio(zdev
))
455 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++) {
456 len
= pci_resource_len(pdev
, i
);
459 pci_iounmap_fh(pdev
, (void __iomem __force
*)
460 pdev
->resource
[i
].start
);
464 static int zpci_alloc_iomap(struct zpci_dev
*zdev
)
468 spin_lock(&zpci_iomap_lock
);
469 entry
= find_first_zero_bit(zpci_iomap_bitmap
, ZPCI_IOMAP_ENTRIES
);
470 if (entry
== ZPCI_IOMAP_ENTRIES
) {
471 spin_unlock(&zpci_iomap_lock
);
474 set_bit(entry
, zpci_iomap_bitmap
);
475 spin_unlock(&zpci_iomap_lock
);
479 static void zpci_free_iomap(struct zpci_dev
*zdev
, int entry
)
481 spin_lock(&zpci_iomap_lock
);
482 memset(&zpci_iomap_start
[entry
], 0, sizeof(struct zpci_iomap_entry
));
483 clear_bit(entry
, zpci_iomap_bitmap
);
484 spin_unlock(&zpci_iomap_lock
);
487 static struct resource
*__alloc_res(struct zpci_dev
*zdev
, unsigned long start
,
488 unsigned long size
, unsigned long flags
)
492 r
= kzalloc(sizeof(*r
), GFP_KERNEL
);
497 r
->end
= r
->start
+ size
- 1;
499 r
->name
= zdev
->res_name
;
501 if (request_resource(&iomem_resource
, r
)) {
508 static int zpci_setup_bus_resources(struct zpci_dev
*zdev
,
509 struct list_head
*resources
)
511 unsigned long addr
, size
, flags
;
512 struct resource
*res
;
515 snprintf(zdev
->res_name
, sizeof(zdev
->res_name
),
516 "PCI Bus %04x:%02x", zdev
->domain
, ZPCI_BUS_NR
);
518 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++) {
519 if (!zdev
->bars
[i
].size
)
521 entry
= zpci_alloc_iomap(zdev
);
524 zdev
->bars
[i
].map_idx
= entry
;
526 /* only MMIO is supported */
527 flags
= IORESOURCE_MEM
;
528 if (zdev
->bars
[i
].val
& 8)
529 flags
|= IORESOURCE_PREFETCH
;
530 if (zdev
->bars
[i
].val
& 4)
531 flags
|= IORESOURCE_MEM_64
;
533 if (zpci_use_mio(zdev
))
534 addr
= (unsigned long) zdev
->bars
[i
].mio_wb
;
536 addr
= ZPCI_ADDR(entry
);
537 size
= 1UL << zdev
->bars
[i
].size
;
539 res
= __alloc_res(zdev
, addr
, size
, flags
);
541 zpci_free_iomap(zdev
, entry
);
544 zdev
->bars
[i
].res
= res
;
545 pci_add_resource(resources
, res
);
551 static void zpci_cleanup_bus_resources(struct zpci_dev
*zdev
)
555 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++) {
556 if (!zdev
->bars
[i
].size
|| !zdev
->bars
[i
].res
)
559 zpci_free_iomap(zdev
, zdev
->bars
[i
].map_idx
);
560 release_resource(zdev
->bars
[i
].res
);
561 kfree(zdev
->bars
[i
].res
);
565 int pcibios_add_device(struct pci_dev
*pdev
)
567 struct resource
*res
;
571 pdev
->no_vf_scan
= 1;
573 pdev
->dev
.groups
= zpci_attr_groups
;
574 pdev
->dev
.dma_ops
= &s390_pci_dma_ops
;
575 zpci_map_resources(pdev
);
577 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++) {
578 res
= &pdev
->resource
[i
];
579 if (res
->parent
|| !res
->flags
)
581 pci_claim_resource(pdev
, i
);
587 void pcibios_release_device(struct pci_dev
*pdev
)
589 zpci_unmap_resources(pdev
);
592 int pcibios_enable_device(struct pci_dev
*pdev
, int mask
)
594 struct zpci_dev
*zdev
= to_zpci(pdev
);
596 zpci_debug_init_device(zdev
, dev_name(&pdev
->dev
));
597 zpci_fmb_enable_device(zdev
);
599 return pci_enable_resources(pdev
, mask
);
602 void pcibios_disable_device(struct pci_dev
*pdev
)
604 struct zpci_dev
*zdev
= to_zpci(pdev
);
606 zpci_fmb_disable_device(zdev
);
607 zpci_debug_exit_device(zdev
);
610 #ifdef CONFIG_HIBERNATE_CALLBACKS
611 static int zpci_restore(struct device
*dev
)
613 struct pci_dev
*pdev
= to_pci_dev(dev
);
614 struct zpci_dev
*zdev
= to_zpci(pdev
);
617 if (zdev
->state
!= ZPCI_FN_STATE_ONLINE
)
620 ret
= clp_enable_fh(zdev
, ZPCI_NR_DMA_SPACES
);
624 zpci_map_resources(pdev
);
625 zpci_register_ioat(zdev
, 0, zdev
->start_dma
, zdev
->end_dma
,
626 (u64
) zdev
->dma_table
);
632 static int zpci_freeze(struct device
*dev
)
634 struct pci_dev
*pdev
= to_pci_dev(dev
);
635 struct zpci_dev
*zdev
= to_zpci(pdev
);
637 if (zdev
->state
!= ZPCI_FN_STATE_ONLINE
)
640 zpci_unregister_ioat(zdev
, 0);
641 zpci_unmap_resources(pdev
);
642 return clp_disable_fh(zdev
);
645 struct dev_pm_ops pcibios_pm_ops
= {
646 .thaw_noirq
= zpci_restore
,
647 .freeze_noirq
= zpci_freeze
,
648 .restore_noirq
= zpci_restore
,
649 .poweroff_noirq
= zpci_freeze
,
651 #endif /* CONFIG_HIBERNATE_CALLBACKS */
653 static int zpci_alloc_domain(struct zpci_dev
*zdev
)
655 if (zpci_unique_uid
) {
656 zdev
->domain
= (u16
) zdev
->uid
;
657 if (zdev
->domain
>= ZPCI_NR_DEVICES
)
660 spin_lock(&zpci_domain_lock
);
661 if (test_bit(zdev
->domain
, zpci_domain
)) {
662 spin_unlock(&zpci_domain_lock
);
663 pr_err("Adding PCI function %08x failed because domain %04x is already assigned\n",
664 zdev
->fid
, zdev
->domain
);
667 set_bit(zdev
->domain
, zpci_domain
);
668 spin_unlock(&zpci_domain_lock
);
672 spin_lock(&zpci_domain_lock
);
673 zdev
->domain
= find_first_zero_bit(zpci_domain
, ZPCI_NR_DEVICES
);
674 if (zdev
->domain
== ZPCI_NR_DEVICES
) {
675 spin_unlock(&zpci_domain_lock
);
676 pr_err("Adding PCI function %08x failed because the configured limit of %d is reached\n",
677 zdev
->fid
, ZPCI_NR_DEVICES
);
680 set_bit(zdev
->domain
, zpci_domain
);
681 spin_unlock(&zpci_domain_lock
);
685 static void zpci_free_domain(struct zpci_dev
*zdev
)
687 if (zdev
->domain
>= ZPCI_NR_DEVICES
)
690 spin_lock(&zpci_domain_lock
);
691 clear_bit(zdev
->domain
, zpci_domain
);
692 spin_unlock(&zpci_domain_lock
);
695 void pcibios_remove_bus(struct pci_bus
*bus
)
697 struct zpci_dev
*zdev
= get_zdev_by_bus(bus
);
699 zpci_exit_slot(zdev
);
700 zpci_cleanup_bus_resources(zdev
);
701 zpci_destroy_iommu(zdev
);
702 zpci_free_domain(zdev
);
704 spin_lock(&zpci_list_lock
);
705 list_del(&zdev
->entry
);
706 spin_unlock(&zpci_list_lock
);
708 zpci_dbg(3, "rem fid:%x\n", zdev
->fid
);
712 static int zpci_scan_bus(struct zpci_dev
*zdev
)
714 LIST_HEAD(resources
);
717 ret
= zpci_setup_bus_resources(zdev
, &resources
);
721 zdev
->bus
= pci_scan_root_bus(NULL
, ZPCI_BUS_NR
, &pci_root_ops
,
727 zdev
->bus
->max_bus_speed
= zdev
->max_bus_speed
;
728 pci_bus_add_devices(zdev
->bus
);
732 zpci_cleanup_bus_resources(zdev
);
733 pci_free_resource_list(&resources
);
737 int zpci_enable_device(struct zpci_dev
*zdev
)
741 rc
= clp_enable_fh(zdev
, ZPCI_NR_DMA_SPACES
);
745 rc
= zpci_dma_init_device(zdev
);
749 zdev
->state
= ZPCI_FN_STATE_ONLINE
;
753 clp_disable_fh(zdev
);
757 EXPORT_SYMBOL_GPL(zpci_enable_device
);
759 int zpci_disable_device(struct zpci_dev
*zdev
)
761 zpci_dma_exit_device(zdev
);
762 return clp_disable_fh(zdev
);
764 EXPORT_SYMBOL_GPL(zpci_disable_device
);
766 int zpci_create_device(struct zpci_dev
*zdev
)
770 rc
= zpci_alloc_domain(zdev
);
774 rc
= zpci_init_iommu(zdev
);
778 mutex_init(&zdev
->lock
);
779 if (zdev
->state
== ZPCI_FN_STATE_CONFIGURED
) {
780 rc
= zpci_enable_device(zdev
);
782 goto out_destroy_iommu
;
784 rc
= zpci_scan_bus(zdev
);
788 spin_lock(&zpci_list_lock
);
789 list_add_tail(&zdev
->entry
, &zpci_list
);
790 spin_unlock(&zpci_list_lock
);
792 zpci_init_slot(zdev
);
797 if (zdev
->state
== ZPCI_FN_STATE_ONLINE
)
798 zpci_disable_device(zdev
);
800 zpci_destroy_iommu(zdev
);
802 zpci_free_domain(zdev
);
807 void zpci_remove_device(struct zpci_dev
*zdev
)
812 pci_stop_root_bus(zdev
->bus
);
813 pci_remove_root_bus(zdev
->bus
);
816 int zpci_report_error(struct pci_dev
*pdev
,
817 struct zpci_report_error_header
*report
)
819 struct zpci_dev
*zdev
= to_zpci(pdev
);
821 return sclp_pci_report(report
, zdev
->fh
, zdev
->fid
);
823 EXPORT_SYMBOL(zpci_report_error
);
825 static int zpci_mem_init(void)
827 BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb
)) ||
828 __alignof__(struct zpci_fmb
) < sizeof(struct zpci_fmb
));
830 zdev_fmb_cache
= kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb
),
831 __alignof__(struct zpci_fmb
), 0, NULL
);
835 zpci_iomap_start
= kcalloc(ZPCI_IOMAP_ENTRIES
,
836 sizeof(*zpci_iomap_start
), GFP_KERNEL
);
837 if (!zpci_iomap_start
)
840 zpci_iomap_bitmap
= kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES
),
841 sizeof(*zpci_iomap_bitmap
), GFP_KERNEL
);
842 if (!zpci_iomap_bitmap
)
843 goto error_iomap_bitmap
;
847 kfree(zpci_iomap_start
);
849 kmem_cache_destroy(zdev_fmb_cache
);
854 static void zpci_mem_exit(void)
856 kfree(zpci_iomap_bitmap
);
857 kfree(zpci_iomap_start
);
858 kmem_cache_destroy(zdev_fmb_cache
);
861 static unsigned int s390_pci_probe __initdata
= 1;
862 static unsigned int s390_pci_no_mio __initdata
;
863 unsigned int s390_pci_force_floating __initdata
;
864 static unsigned int s390_pci_initialized
;
866 char * __init
pcibios_setup(char *str
)
868 if (!strcmp(str
, "off")) {
872 if (!strcmp(str
, "nomio")) {
876 if (!strcmp(str
, "force_floating")) {
877 s390_pci_force_floating
= 1;
883 bool zpci_is_enabled(void)
885 return s390_pci_initialized
;
888 static int __init
pci_base_init(void)
895 if (!test_facility(69) || !test_facility(71))
898 if (test_facility(153) && !s390_pci_no_mio
) {
899 static_branch_enable(&have_mio
);
903 rc
= zpci_debug_init();
907 rc
= zpci_mem_init();
911 rc
= zpci_irq_init();
915 rc
= zpci_dma_init();
919 rc
= clp_scan_pci_devices();
923 s390_pci_initialized
= 1;
937 subsys_initcall_sync(pci_base_init
);
939 void zpci_rescan(void)
941 if (zpci_is_enabled())
942 clp_rescan_pci_devices_simple(NULL
);