1 // SPDX-License-Identifier: GPL-2.0
3 * IOMMU API for s390 PCI devices
5 * Copyright IBM Corp. 2015
6 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
10 #include <linux/iommu.h>
11 #include <linux/iommu-helper.h>
12 #include <linux/sizes.h>
13 #include <linux/rculist.h>
14 #include <linux/rcupdate.h>
15 #include <asm/pci_dma.h>
17 #include "dma-iommu.h"
19 static const struct iommu_ops s390_iommu_ops
;
21 static struct kmem_cache
*dma_region_table_cache
;
22 static struct kmem_cache
*dma_page_table_cache
;
24 static u64 s390_iommu_aperture
;
25 static u32 s390_iommu_aperture_factor
= 1;
28 struct iommu_domain domain
;
29 struct list_head devices
;
30 struct zpci_iommu_ctrs ctrs
;
31 unsigned long *dma_table
;
36 static struct iommu_domain blocking_domain
;
38 static inline unsigned int calc_rtx(dma_addr_t ptr
)
40 return ((unsigned long)ptr
>> ZPCI_RT_SHIFT
) & ZPCI_INDEX_MASK
;
43 static inline unsigned int calc_sx(dma_addr_t ptr
)
45 return ((unsigned long)ptr
>> ZPCI_ST_SHIFT
) & ZPCI_INDEX_MASK
;
48 static inline unsigned int calc_px(dma_addr_t ptr
)
50 return ((unsigned long)ptr
>> PAGE_SHIFT
) & ZPCI_PT_MASK
;
53 static inline void set_pt_pfaa(unsigned long *entry
, phys_addr_t pfaa
)
55 *entry
&= ZPCI_PTE_FLAG_MASK
;
56 *entry
|= (pfaa
& ZPCI_PTE_ADDR_MASK
);
59 static inline void set_rt_sto(unsigned long *entry
, phys_addr_t sto
)
61 *entry
&= ZPCI_RTE_FLAG_MASK
;
62 *entry
|= (sto
& ZPCI_RTE_ADDR_MASK
);
63 *entry
|= ZPCI_TABLE_TYPE_RTX
;
66 static inline void set_st_pto(unsigned long *entry
, phys_addr_t pto
)
68 *entry
&= ZPCI_STE_FLAG_MASK
;
69 *entry
|= (pto
& ZPCI_STE_ADDR_MASK
);
70 *entry
|= ZPCI_TABLE_TYPE_SX
;
73 static inline void validate_rt_entry(unsigned long *entry
)
75 *entry
&= ~ZPCI_TABLE_VALID_MASK
;
76 *entry
&= ~ZPCI_TABLE_OFFSET_MASK
;
77 *entry
|= ZPCI_TABLE_VALID
;
78 *entry
|= ZPCI_TABLE_LEN_RTX
;
81 static inline void validate_st_entry(unsigned long *entry
)
83 *entry
&= ~ZPCI_TABLE_VALID_MASK
;
84 *entry
|= ZPCI_TABLE_VALID
;
87 static inline void invalidate_pt_entry(unsigned long *entry
)
89 WARN_ON_ONCE((*entry
& ZPCI_PTE_VALID_MASK
) == ZPCI_PTE_INVALID
);
90 *entry
&= ~ZPCI_PTE_VALID_MASK
;
91 *entry
|= ZPCI_PTE_INVALID
;
94 static inline void validate_pt_entry(unsigned long *entry
)
96 WARN_ON_ONCE((*entry
& ZPCI_PTE_VALID_MASK
) == ZPCI_PTE_VALID
);
97 *entry
&= ~ZPCI_PTE_VALID_MASK
;
98 *entry
|= ZPCI_PTE_VALID
;
101 static inline void entry_set_protected(unsigned long *entry
)
103 *entry
&= ~ZPCI_TABLE_PROT_MASK
;
104 *entry
|= ZPCI_TABLE_PROTECTED
;
107 static inline void entry_clr_protected(unsigned long *entry
)
109 *entry
&= ~ZPCI_TABLE_PROT_MASK
;
110 *entry
|= ZPCI_TABLE_UNPROTECTED
;
113 static inline int reg_entry_isvalid(unsigned long entry
)
115 return (entry
& ZPCI_TABLE_VALID_MASK
) == ZPCI_TABLE_VALID
;
118 static inline int pt_entry_isvalid(unsigned long entry
)
120 return (entry
& ZPCI_PTE_VALID_MASK
) == ZPCI_PTE_VALID
;
123 static inline unsigned long *get_rt_sto(unsigned long entry
)
125 if ((entry
& ZPCI_TABLE_TYPE_MASK
) == ZPCI_TABLE_TYPE_RTX
)
126 return phys_to_virt(entry
& ZPCI_RTE_ADDR_MASK
);
131 static inline unsigned long *get_st_pto(unsigned long entry
)
133 if ((entry
& ZPCI_TABLE_TYPE_MASK
) == ZPCI_TABLE_TYPE_SX
)
134 return phys_to_virt(entry
& ZPCI_STE_ADDR_MASK
);
139 static int __init
dma_alloc_cpu_table_caches(void)
141 dma_region_table_cache
= kmem_cache_create("PCI_DMA_region_tables",
145 if (!dma_region_table_cache
)
148 dma_page_table_cache
= kmem_cache_create("PCI_DMA_page_tables",
152 if (!dma_page_table_cache
) {
153 kmem_cache_destroy(dma_region_table_cache
);
159 static unsigned long *dma_alloc_cpu_table(gfp_t gfp
)
161 unsigned long *table
, *entry
;
163 table
= kmem_cache_alloc(dma_region_table_cache
, gfp
);
167 for (entry
= table
; entry
< table
+ ZPCI_TABLE_ENTRIES
; entry
++)
168 *entry
= ZPCI_TABLE_INVALID
;
172 static void dma_free_cpu_table(void *table
)
174 kmem_cache_free(dma_region_table_cache
, table
);
177 static void dma_free_page_table(void *table
)
179 kmem_cache_free(dma_page_table_cache
, table
);
182 static void dma_free_seg_table(unsigned long entry
)
184 unsigned long *sto
= get_rt_sto(entry
);
187 for (sx
= 0; sx
< ZPCI_TABLE_ENTRIES
; sx
++)
188 if (reg_entry_isvalid(sto
[sx
]))
189 dma_free_page_table(get_st_pto(sto
[sx
]));
191 dma_free_cpu_table(sto
);
194 static void dma_cleanup_tables(unsigned long *table
)
201 for (rtx
= 0; rtx
< ZPCI_TABLE_ENTRIES
; rtx
++)
202 if (reg_entry_isvalid(table
[rtx
]))
203 dma_free_seg_table(table
[rtx
]);
205 dma_free_cpu_table(table
);
208 static unsigned long *dma_alloc_page_table(gfp_t gfp
)
210 unsigned long *table
, *entry
;
212 table
= kmem_cache_alloc(dma_page_table_cache
, gfp
);
216 for (entry
= table
; entry
< table
+ ZPCI_PT_ENTRIES
; entry
++)
217 *entry
= ZPCI_PTE_INVALID
;
221 static unsigned long *dma_get_seg_table_origin(unsigned long *rtep
, gfp_t gfp
)
223 unsigned long old_rte
, rte
;
226 rte
= READ_ONCE(*rtep
);
227 if (reg_entry_isvalid(rte
)) {
228 sto
= get_rt_sto(rte
);
230 sto
= dma_alloc_cpu_table(gfp
);
234 set_rt_sto(&rte
, virt_to_phys(sto
));
235 validate_rt_entry(&rte
);
236 entry_clr_protected(&rte
);
238 old_rte
= cmpxchg(rtep
, ZPCI_TABLE_INVALID
, rte
);
239 if (old_rte
!= ZPCI_TABLE_INVALID
) {
240 /* Somone else was faster, use theirs */
241 dma_free_cpu_table(sto
);
242 sto
= get_rt_sto(old_rte
);
248 static unsigned long *dma_get_page_table_origin(unsigned long *step
, gfp_t gfp
)
250 unsigned long old_ste
, ste
;
253 ste
= READ_ONCE(*step
);
254 if (reg_entry_isvalid(ste
)) {
255 pto
= get_st_pto(ste
);
257 pto
= dma_alloc_page_table(gfp
);
260 set_st_pto(&ste
, virt_to_phys(pto
));
261 validate_st_entry(&ste
);
262 entry_clr_protected(&ste
);
264 old_ste
= cmpxchg(step
, ZPCI_TABLE_INVALID
, ste
);
265 if (old_ste
!= ZPCI_TABLE_INVALID
) {
266 /* Somone else was faster, use theirs */
267 dma_free_page_table(pto
);
268 pto
= get_st_pto(old_ste
);
274 static unsigned long *dma_walk_cpu_trans(unsigned long *rto
, dma_addr_t dma_addr
, gfp_t gfp
)
276 unsigned long *sto
, *pto
;
277 unsigned int rtx
, sx
, px
;
279 rtx
= calc_rtx(dma_addr
);
280 sto
= dma_get_seg_table_origin(&rto
[rtx
], gfp
);
284 sx
= calc_sx(dma_addr
);
285 pto
= dma_get_page_table_origin(&sto
[sx
], gfp
);
289 px
= calc_px(dma_addr
);
293 static void dma_update_cpu_trans(unsigned long *ptep
, phys_addr_t page_addr
, int flags
)
297 pte
= READ_ONCE(*ptep
);
298 if (flags
& ZPCI_PTE_INVALID
) {
299 invalidate_pt_entry(&pte
);
301 set_pt_pfaa(&pte
, page_addr
);
302 validate_pt_entry(&pte
);
305 if (flags
& ZPCI_TABLE_PROTECTED
)
306 entry_set_protected(&pte
);
308 entry_clr_protected(&pte
);
313 static struct s390_domain
*to_s390_domain(struct iommu_domain
*dom
)
315 return container_of(dom
, struct s390_domain
, domain
);
318 static bool s390_iommu_capable(struct device
*dev
, enum iommu_cap cap
)
320 struct zpci_dev
*zdev
= to_zpci_dev(dev
);
323 case IOMMU_CAP_CACHE_COHERENCY
:
325 case IOMMU_CAP_DEFERRED_FLUSH
:
326 return zdev
->pft
!= PCI_FUNC_TYPE_ISM
;
332 static struct iommu_domain
*s390_domain_alloc_paging(struct device
*dev
)
334 struct s390_domain
*s390_domain
;
336 s390_domain
= kzalloc(sizeof(*s390_domain
), GFP_KERNEL
);
340 s390_domain
->dma_table
= dma_alloc_cpu_table(GFP_KERNEL
);
341 if (!s390_domain
->dma_table
) {
345 s390_domain
->domain
.geometry
.force_aperture
= true;
346 s390_domain
->domain
.geometry
.aperture_start
= 0;
347 s390_domain
->domain
.geometry
.aperture_end
= ZPCI_TABLE_SIZE_RT
- 1;
349 spin_lock_init(&s390_domain
->list_lock
);
350 INIT_LIST_HEAD_RCU(&s390_domain
->devices
);
352 return &s390_domain
->domain
;
355 static void s390_iommu_rcu_free_domain(struct rcu_head
*head
)
357 struct s390_domain
*s390_domain
= container_of(head
, struct s390_domain
, rcu
);
359 dma_cleanup_tables(s390_domain
->dma_table
);
363 static void s390_domain_free(struct iommu_domain
*domain
)
365 struct s390_domain
*s390_domain
= to_s390_domain(domain
);
368 WARN_ON(!list_empty(&s390_domain
->devices
));
371 call_rcu(&s390_domain
->rcu
, s390_iommu_rcu_free_domain
);
374 static void zdev_s390_domain_update(struct zpci_dev
*zdev
,
375 struct iommu_domain
*domain
)
379 spin_lock_irqsave(&zdev
->dom_lock
, flags
);
380 zdev
->s390_domain
= domain
;
381 spin_unlock_irqrestore(&zdev
->dom_lock
, flags
);
384 static int blocking_domain_attach_device(struct iommu_domain
*domain
,
387 struct zpci_dev
*zdev
= to_zpci_dev(dev
);
388 struct s390_domain
*s390_domain
;
391 if (zdev
->s390_domain
->type
== IOMMU_DOMAIN_BLOCKED
)
394 s390_domain
= to_s390_domain(zdev
->s390_domain
);
395 spin_lock_irqsave(&s390_domain
->list_lock
, flags
);
396 list_del_rcu(&zdev
->iommu_list
);
397 spin_unlock_irqrestore(&s390_domain
->list_lock
, flags
);
399 zpci_unregister_ioat(zdev
, 0);
400 zdev
->dma_table
= NULL
;
401 zdev_s390_domain_update(zdev
, domain
);
406 static int s390_iommu_attach_device(struct iommu_domain
*domain
,
409 struct s390_domain
*s390_domain
= to_s390_domain(domain
);
410 struct zpci_dev
*zdev
= to_zpci_dev(dev
);
418 if (WARN_ON(domain
->geometry
.aperture_start
> zdev
->end_dma
||
419 domain
->geometry
.aperture_end
< zdev
->start_dma
))
422 blocking_domain_attach_device(&blocking_domain
, dev
);
424 /* If we fail now DMA remains blocked via blocking domain */
425 cc
= zpci_register_ioat(zdev
, 0, zdev
->start_dma
, zdev
->end_dma
,
426 virt_to_phys(s390_domain
->dma_table
), &status
);
427 if (cc
&& status
!= ZPCI_PCI_ST_FUNC_NOT_AVAIL
)
429 zdev
->dma_table
= s390_domain
->dma_table
;
430 zdev_s390_domain_update(zdev
, domain
);
432 spin_lock_irqsave(&s390_domain
->list_lock
, flags
);
433 list_add_rcu(&zdev
->iommu_list
, &s390_domain
->devices
);
434 spin_unlock_irqrestore(&s390_domain
->list_lock
, flags
);
439 static void s390_iommu_get_resv_regions(struct device
*dev
,
440 struct list_head
*list
)
442 struct zpci_dev
*zdev
= to_zpci_dev(dev
);
443 struct iommu_resv_region
*region
;
445 if (zdev
->start_dma
) {
446 region
= iommu_alloc_resv_region(0, zdev
->start_dma
, 0,
447 IOMMU_RESV_RESERVED
, GFP_KERNEL
);
450 list_add_tail(®ion
->list
, list
);
453 if (zdev
->end_dma
< ZPCI_TABLE_SIZE_RT
- 1) {
454 region
= iommu_alloc_resv_region(zdev
->end_dma
+ 1,
455 ZPCI_TABLE_SIZE_RT
- zdev
->end_dma
- 1,
456 0, IOMMU_RESV_RESERVED
, GFP_KERNEL
);
459 list_add_tail(®ion
->list
, list
);
463 static struct iommu_device
*s390_iommu_probe_device(struct device
*dev
)
465 struct zpci_dev
*zdev
;
467 if (!dev_is_pci(dev
))
468 return ERR_PTR(-ENODEV
);
470 zdev
= to_zpci_dev(dev
);
472 if (zdev
->start_dma
> zdev
->end_dma
||
473 zdev
->start_dma
> ZPCI_TABLE_SIZE_RT
- 1)
474 return ERR_PTR(-EINVAL
);
476 if (zdev
->end_dma
> ZPCI_TABLE_SIZE_RT
- 1)
477 zdev
->end_dma
= ZPCI_TABLE_SIZE_RT
- 1;
479 if (zdev
->tlb_refresh
)
480 dev
->iommu
->shadow_on_flush
= 1;
482 /* Start with DMA blocked */
483 spin_lock_init(&zdev
->dom_lock
);
484 zdev_s390_domain_update(zdev
, &blocking_domain
);
486 return &zdev
->iommu_dev
;
489 static int zpci_refresh_all(struct zpci_dev
*zdev
)
491 return zpci_refresh_trans((u64
)zdev
->fh
<< 32, zdev
->start_dma
,
492 zdev
->end_dma
- zdev
->start_dma
+ 1);
495 static void s390_iommu_flush_iotlb_all(struct iommu_domain
*domain
)
497 struct s390_domain
*s390_domain
= to_s390_domain(domain
);
498 struct zpci_dev
*zdev
;
501 list_for_each_entry_rcu(zdev
, &s390_domain
->devices
, iommu_list
) {
502 atomic64_inc(&s390_domain
->ctrs
.global_rpcits
);
503 zpci_refresh_all(zdev
);
508 static void s390_iommu_iotlb_sync(struct iommu_domain
*domain
,
509 struct iommu_iotlb_gather
*gather
)
511 struct s390_domain
*s390_domain
= to_s390_domain(domain
);
512 size_t size
= gather
->end
- gather
->start
+ 1;
513 struct zpci_dev
*zdev
;
515 /* If gather was never added to there is nothing to flush */
520 list_for_each_entry_rcu(zdev
, &s390_domain
->devices
, iommu_list
) {
521 atomic64_inc(&s390_domain
->ctrs
.sync_rpcits
);
522 zpci_refresh_trans((u64
)zdev
->fh
<< 32, gather
->start
,
528 static int s390_iommu_iotlb_sync_map(struct iommu_domain
*domain
,
529 unsigned long iova
, size_t size
)
531 struct s390_domain
*s390_domain
= to_s390_domain(domain
);
532 struct zpci_dev
*zdev
;
536 list_for_each_entry_rcu(zdev
, &s390_domain
->devices
, iommu_list
) {
537 if (!zdev
->tlb_refresh
)
539 atomic64_inc(&s390_domain
->ctrs
.sync_map_rpcits
);
540 ret
= zpci_refresh_trans((u64
)zdev
->fh
<< 32,
543 * let the hypervisor discover invalidated entries
544 * allowing it to free IOVAs and unpin pages
546 if (ret
== -ENOMEM
) {
547 ret
= zpci_refresh_all(zdev
);
557 static int s390_iommu_validate_trans(struct s390_domain
*s390_domain
,
558 phys_addr_t pa
, dma_addr_t dma_addr
,
559 unsigned long nr_pages
, int flags
,
562 phys_addr_t page_addr
= pa
& PAGE_MASK
;
563 unsigned long *entry
;
567 for (i
= 0; i
< nr_pages
; i
++) {
568 entry
= dma_walk_cpu_trans(s390_domain
->dma_table
, dma_addr
,
570 if (unlikely(!entry
)) {
574 dma_update_cpu_trans(entry
, page_addr
, flags
);
575 page_addr
+= PAGE_SIZE
;
576 dma_addr
+= PAGE_SIZE
;
583 dma_addr
-= PAGE_SIZE
;
584 entry
= dma_walk_cpu_trans(s390_domain
->dma_table
,
588 dma_update_cpu_trans(entry
, 0, ZPCI_PTE_INVALID
);
594 static int s390_iommu_invalidate_trans(struct s390_domain
*s390_domain
,
595 dma_addr_t dma_addr
, unsigned long nr_pages
)
597 unsigned long *entry
;
601 for (i
= 0; i
< nr_pages
; i
++) {
602 entry
= dma_walk_cpu_trans(s390_domain
->dma_table
, dma_addr
,
604 if (unlikely(!entry
)) {
608 dma_update_cpu_trans(entry
, 0, ZPCI_PTE_INVALID
);
609 dma_addr
+= PAGE_SIZE
;
615 static int s390_iommu_map_pages(struct iommu_domain
*domain
,
616 unsigned long iova
, phys_addr_t paddr
,
617 size_t pgsize
, size_t pgcount
,
618 int prot
, gfp_t gfp
, size_t *mapped
)
620 struct s390_domain
*s390_domain
= to_s390_domain(domain
);
621 size_t size
= pgcount
<< __ffs(pgsize
);
622 int flags
= ZPCI_PTE_VALID
, rc
= 0;
627 if (iova
< s390_domain
->domain
.geometry
.aperture_start
||
628 (iova
+ size
- 1) > s390_domain
->domain
.geometry
.aperture_end
)
631 if (!IS_ALIGNED(iova
| paddr
, pgsize
))
634 if (!(prot
& IOMMU_WRITE
))
635 flags
|= ZPCI_TABLE_PROTECTED
;
637 rc
= s390_iommu_validate_trans(s390_domain
, paddr
, iova
,
638 pgcount
, flags
, gfp
);
641 atomic64_add(pgcount
, &s390_domain
->ctrs
.mapped_pages
);
647 static phys_addr_t
s390_iommu_iova_to_phys(struct iommu_domain
*domain
,
650 struct s390_domain
*s390_domain
= to_s390_domain(domain
);
651 unsigned long *rto
, *sto
, *pto
;
652 unsigned long ste
, pte
, rte
;
653 unsigned int rtx
, sx
, px
;
654 phys_addr_t phys
= 0;
656 if (iova
< domain
->geometry
.aperture_start
||
657 iova
> domain
->geometry
.aperture_end
)
660 rtx
= calc_rtx(iova
);
663 rto
= s390_domain
->dma_table
;
665 rte
= READ_ONCE(rto
[rtx
]);
666 if (reg_entry_isvalid(rte
)) {
667 sto
= get_rt_sto(rte
);
668 ste
= READ_ONCE(sto
[sx
]);
669 if (reg_entry_isvalid(ste
)) {
670 pto
= get_st_pto(ste
);
671 pte
= READ_ONCE(pto
[px
]);
672 if (pt_entry_isvalid(pte
))
673 phys
= pte
& ZPCI_PTE_ADDR_MASK
;
680 static size_t s390_iommu_unmap_pages(struct iommu_domain
*domain
,
682 size_t pgsize
, size_t pgcount
,
683 struct iommu_iotlb_gather
*gather
)
685 struct s390_domain
*s390_domain
= to_s390_domain(domain
);
686 size_t size
= pgcount
<< __ffs(pgsize
);
689 if (WARN_ON(iova
< s390_domain
->domain
.geometry
.aperture_start
||
690 (iova
+ size
- 1) > s390_domain
->domain
.geometry
.aperture_end
))
693 rc
= s390_iommu_invalidate_trans(s390_domain
, iova
, pgcount
);
697 iommu_iotlb_gather_add_range(gather
, iova
, size
);
698 atomic64_add(pgcount
, &s390_domain
->ctrs
.unmapped_pages
);
703 struct zpci_iommu_ctrs
*zpci_get_iommu_ctrs(struct zpci_dev
*zdev
)
705 struct s390_domain
*s390_domain
;
707 lockdep_assert_held(&zdev
->dom_lock
);
709 if (zdev
->s390_domain
->type
== IOMMU_DOMAIN_BLOCKED
)
712 s390_domain
= to_s390_domain(zdev
->s390_domain
);
713 return &s390_domain
->ctrs
;
716 int zpci_init_iommu(struct zpci_dev
*zdev
)
721 rc
= iommu_device_sysfs_add(&zdev
->iommu_dev
, NULL
, NULL
,
722 "s390-iommu.%08x", zdev
->fid
);
726 rc
= iommu_device_register(&zdev
->iommu_dev
, &s390_iommu_ops
, NULL
);
730 zdev
->start_dma
= PAGE_ALIGN(zdev
->start_dma
);
731 aperture_size
= min3(s390_iommu_aperture
,
732 ZPCI_TABLE_SIZE_RT
- zdev
->start_dma
,
733 zdev
->end_dma
- zdev
->start_dma
+ 1);
734 zdev
->end_dma
= zdev
->start_dma
+ aperture_size
- 1;
739 iommu_device_sysfs_remove(&zdev
->iommu_dev
);
745 void zpci_destroy_iommu(struct zpci_dev
*zdev
)
747 iommu_device_unregister(&zdev
->iommu_dev
);
748 iommu_device_sysfs_remove(&zdev
->iommu_dev
);
751 static int __init
s390_iommu_setup(char *str
)
753 if (!strcmp(str
, "strict")) {
754 pr_warn("s390_iommu=strict deprecated; use iommu.strict=1 instead\n");
755 iommu_set_dma_strict();
760 __setup("s390_iommu=", s390_iommu_setup
);
762 static int __init
s390_iommu_aperture_setup(char *str
)
764 if (kstrtou32(str
, 10, &s390_iommu_aperture_factor
))
765 s390_iommu_aperture_factor
= 1;
769 __setup("s390_iommu_aperture=", s390_iommu_aperture_setup
);
771 static int __init
s390_iommu_init(void)
775 iommu_dma_forcedac
= true;
776 s390_iommu_aperture
= (u64
)virt_to_phys(high_memory
);
777 if (!s390_iommu_aperture_factor
)
778 s390_iommu_aperture
= ULONG_MAX
;
780 s390_iommu_aperture
*= s390_iommu_aperture_factor
;
782 rc
= dma_alloc_cpu_table_caches();
788 subsys_initcall(s390_iommu_init
);
790 static struct iommu_domain blocking_domain
= {
791 .type
= IOMMU_DOMAIN_BLOCKED
,
792 .ops
= &(const struct iommu_domain_ops
) {
793 .attach_dev
= blocking_domain_attach_device
,
797 static const struct iommu_ops s390_iommu_ops
= {
798 .blocked_domain
= &blocking_domain
,
799 .release_domain
= &blocking_domain
,
800 .capable
= s390_iommu_capable
,
801 .domain_alloc_paging
= s390_domain_alloc_paging
,
802 .probe_device
= s390_iommu_probe_device
,
803 .device_group
= generic_device_group
,
804 .pgsize_bitmap
= SZ_4K
,
805 .get_resv_regions
= s390_iommu_get_resv_regions
,
806 .default_domain_ops
= &(const struct iommu_domain_ops
) {
807 .attach_dev
= s390_iommu_attach_device
,
808 .map_pages
= s390_iommu_map_pages
,
809 .unmap_pages
= s390_iommu_unmap_pages
,
810 .flush_iotlb_all
= s390_iommu_flush_iotlb_all
,
811 .iotlb_sync
= s390_iommu_iotlb_sync
,
812 .iotlb_sync_map
= s390_iommu_iotlb_sync_map
,
813 .iova_to_phys
= s390_iommu_iova_to_phys
,
814 .free
= s390_domain_free
,