1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
7 #ifndef __LINUX_IOMMU_H
8 #define __LINUX_IOMMU_H
10 #include <linux/scatterlist.h>
11 #include <linux/device.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
16 #include <linux/ioasid.h>
17 #include <uapi/linux/iommu.h>
19 #define IOMMU_READ (1 << 0)
20 #define IOMMU_WRITE (1 << 1)
21 #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
22 #define IOMMU_NOEXEC (1 << 3)
23 #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
25 * Where the bus hardware includes a privilege level as part of its access type
26 * markings, and certain devices are capable of issuing transactions marked as
27 * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other
28 * given permission flags only apply to accesses at the higher privilege level,
29 * and that unprivileged transactions should have as little access as possible.
30 * This would usually imply the same permissions as kernel mappings on the CPU,
31 * if the IOMMU page table format is equivalent.
33 #define IOMMU_PRIV (1 << 5)
35 * Non-coherent masters can use this page protection flag to set cacheable
36 * memory attributes for only a transparent outer level of cache, also known as
37 * the last-level or system cache.
39 #define IOMMU_SYS_CACHE_ONLY (1 << 6)
46 struct notifier_block
;
48 struct iommu_fault_event
;
50 /* iommu fault flags */
51 #define IOMMU_FAULT_READ 0x0
52 #define IOMMU_FAULT_WRITE 0x1
54 typedef int (*iommu_fault_handler_t
)(struct iommu_domain
*,
55 struct device
*, unsigned long, int, void *);
56 typedef int (*iommu_mm_exit_handler_t
)(struct device
*dev
, struct iommu_sva
*,
58 typedef int (*iommu_dev_fault_handler_t
)(struct iommu_fault
*, void *);
60 struct iommu_domain_geometry
{
61 dma_addr_t aperture_start
; /* First address that can be mapped */
62 dma_addr_t aperture_end
; /* Last address that can be mapped */
63 bool force_aperture
; /* DMA only allowed in mappable range? */
66 /* Domain feature flags */
67 #define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
68 #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
70 #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
73 * This are the possible domain-types
75 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
77 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
78 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
80 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
81 * This flag allows IOMMU drivers to implement
82 * certain optimizations for these domains
84 #define IOMMU_DOMAIN_BLOCKED (0U)
85 #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
86 #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
87 #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
88 __IOMMU_DOMAIN_DMA_API)
92 const struct iommu_ops
*ops
;
93 unsigned long pgsize_bitmap
; /* Bitmap of page sizes in use */
94 iommu_fault_handler_t handler
;
96 struct iommu_domain_geometry geometry
;
101 IOMMU_CAP_CACHE_COHERENCY
, /* IOMMU can enforce cache coherent DMA
103 IOMMU_CAP_INTR_REMAP
, /* IOMMU supports interrupt isolation */
104 IOMMU_CAP_NOEXEC
, /* IOMMU_NOEXEC flag */
108 * Following constraints are specifc to FSL_PAMUV1:
109 * -aperture must be power of 2, and naturally aligned
110 * -number of windows must be power of 2, and address space size
111 * of each window is determined by aperture size / # of windows
112 * -the actual size of the mapped region of a window must be power
113 * of 2 starting with 4KB and physical address must be naturally
115 * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints.
116 * The caller can invoke iommu_domain_get_attr to check if the underlying
117 * iommu implementation supports these constraints.
121 DOMAIN_ATTR_GEOMETRY
,
124 DOMAIN_ATTR_FSL_PAMU_STASH
,
125 DOMAIN_ATTR_FSL_PAMU_ENABLE
,
126 DOMAIN_ATTR_FSL_PAMUV1
,
127 DOMAIN_ATTR_NESTING
, /* two stages of translation */
128 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
,
132 /* These are the possible reserved region types */
133 enum iommu_resv_type
{
134 /* Memory regions which must be mapped 1:1 at all times */
137 * Memory regions which are advertised to be 1:1 but are
138 * commonly considered relaxable in some conditions,
139 * for instance in device assignment use case (USB, Graphics)
141 IOMMU_RESV_DIRECT_RELAXABLE
,
142 /* Arbitrary "never map this or give it to a device" address ranges */
144 /* Hardware MSI region (untranslated) */
146 /* Software-managed MSI translation window */
151 * struct iommu_resv_region - descriptor for a reserved memory region
152 * @list: Linked list pointers
153 * @start: System physical start address of the region
154 * @length: Length of the region in bytes
155 * @prot: IOMMU Protection flags (READ/WRITE/...)
156 * @type: Type of the reserved region
158 struct iommu_resv_region
{
159 struct list_head list
;
163 enum iommu_resv_type type
;
166 /* Per device IOMMU features */
167 enum iommu_dev_features
{
168 IOMMU_DEV_FEAT_AUX
, /* Aux-domain feature */
169 IOMMU_DEV_FEAT_SVA
, /* Shared Virtual Addresses */
172 #define IOMMU_PASID_INVALID (-1U)
175 * struct iommu_sva_ops - device driver callbacks for an SVA context
177 * @mm_exit: called when the mm is about to be torn down by exit_mmap. After
178 * @mm_exit returns, the device must not issue any more transaction
179 * with the PASID given as argument.
181 * The @mm_exit handler is allowed to sleep. Be careful about the
182 * locks taken in @mm_exit, because they might lead to deadlocks if
183 * they are also held when dropping references to the mm. Consider the
184 * following call chain:
185 * mutex_lock(A); mmput(mm) -> exit_mm() -> @mm_exit() -> mutex_lock(A)
186 * Using mmput_async() prevents this scenario.
189 struct iommu_sva_ops
{
190 iommu_mm_exit_handler_t mm_exit
;
193 #ifdef CONFIG_IOMMU_API
196 * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
198 * @start: IOVA representing the start of the range to be flushed
199 * @end: IOVA representing the end of the range to be flushed (exclusive)
200 * @pgsize: The interval at which to perform the flush
202 * This structure is intended to be updated by multiple calls to the
203 * ->unmap() function in struct iommu_ops before eventually being passed
204 * into ->iotlb_sync().
206 struct iommu_iotlb_gather
{
213 * struct iommu_ops - iommu ops and capabilities
214 * @capable: check capability
215 * @domain_alloc: allocate iommu domain
216 * @domain_free: free iommu domain
217 * @attach_dev: attach device to an iommu domain
218 * @detach_dev: detach device from an iommu domain
219 * @map: map a physically contiguous memory region to an iommu domain
220 * @unmap: unmap a physically contiguous memory region from an iommu domain
221 * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
222 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
223 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
225 * @iova_to_phys: translate iova to physical address
226 * @add_device: add device to iommu grouping
227 * @remove_device: remove device from iommu grouping
228 * @device_group: find iommu group for a particular device
229 * @domain_get_attr: Query domain attributes
230 * @domain_set_attr: Change domain attributes
231 * @get_resv_regions: Request list of reserved regions for a device
232 * @put_resv_regions: Free list of reserved regions for a device
233 * @apply_resv_region: Temporary helper call-back for iova reserved ranges
234 * @domain_window_enable: Configure and enable a particular window for a domain
235 * @domain_window_disable: Disable a particular window for a domain
236 * @of_xlate: add OF master IDs to iommu grouping
237 * @is_attach_deferred: Check if domain attach should be deferred from iommu
238 * driver init to device driver init (default no)
239 * @dev_has/enable/disable_feat: per device entries to check/enable/disable
240 * iommu specific features.
241 * @dev_feat_enabled: check enabled feature
242 * @aux_attach/detach_dev: aux-domain specific attach/detach entries.
243 * @aux_get_pasid: get the pasid given an aux-domain
244 * @sva_bind: Bind process address space to device
245 * @sva_unbind: Unbind process address space from device
246 * @sva_get_pasid: Get PASID associated to a SVA handle
247 * @page_response: handle page request response
248 * @cache_invalidate: invalidate translation caches
249 * @sva_bind_gpasid: bind guest pasid and mm
250 * @sva_unbind_gpasid: unbind guest pasid and mm
251 * @pgsize_bitmap: bitmap of all possible supported page sizes
252 * @owner: Driver module providing these ops
255 bool (*capable
)(enum iommu_cap
);
257 /* Domain allocation and freeing by the iommu driver */
258 struct iommu_domain
*(*domain_alloc
)(unsigned iommu_domain_type
);
259 void (*domain_free
)(struct iommu_domain
*);
261 int (*attach_dev
)(struct iommu_domain
*domain
, struct device
*dev
);
262 void (*detach_dev
)(struct iommu_domain
*domain
, struct device
*dev
);
263 int (*map
)(struct iommu_domain
*domain
, unsigned long iova
,
264 phys_addr_t paddr
, size_t size
, int prot
, gfp_t gfp
);
265 size_t (*unmap
)(struct iommu_domain
*domain
, unsigned long iova
,
266 size_t size
, struct iommu_iotlb_gather
*iotlb_gather
);
267 void (*flush_iotlb_all
)(struct iommu_domain
*domain
);
268 void (*iotlb_sync_map
)(struct iommu_domain
*domain
);
269 void (*iotlb_sync
)(struct iommu_domain
*domain
,
270 struct iommu_iotlb_gather
*iotlb_gather
);
271 phys_addr_t (*iova_to_phys
)(struct iommu_domain
*domain
, dma_addr_t iova
);
272 int (*add_device
)(struct device
*dev
);
273 void (*remove_device
)(struct device
*dev
);
274 struct iommu_group
*(*device_group
)(struct device
*dev
);
275 int (*domain_get_attr
)(struct iommu_domain
*domain
,
276 enum iommu_attr attr
, void *data
);
277 int (*domain_set_attr
)(struct iommu_domain
*domain
,
278 enum iommu_attr attr
, void *data
);
280 /* Request/Free a list of reserved regions for a device */
281 void (*get_resv_regions
)(struct device
*dev
, struct list_head
*list
);
282 void (*put_resv_regions
)(struct device
*dev
, struct list_head
*list
);
283 void (*apply_resv_region
)(struct device
*dev
,
284 struct iommu_domain
*domain
,
285 struct iommu_resv_region
*region
);
287 /* Window handling functions */
288 int (*domain_window_enable
)(struct iommu_domain
*domain
, u32 wnd_nr
,
289 phys_addr_t paddr
, u64 size
, int prot
);
290 void (*domain_window_disable
)(struct iommu_domain
*domain
, u32 wnd_nr
);
292 int (*of_xlate
)(struct device
*dev
, struct of_phandle_args
*args
);
293 bool (*is_attach_deferred
)(struct iommu_domain
*domain
, struct device
*dev
);
295 /* Per device IOMMU features */
296 bool (*dev_has_feat
)(struct device
*dev
, enum iommu_dev_features f
);
297 bool (*dev_feat_enabled
)(struct device
*dev
, enum iommu_dev_features f
);
298 int (*dev_enable_feat
)(struct device
*dev
, enum iommu_dev_features f
);
299 int (*dev_disable_feat
)(struct device
*dev
, enum iommu_dev_features f
);
301 /* Aux-domain specific attach/detach entries */
302 int (*aux_attach_dev
)(struct iommu_domain
*domain
, struct device
*dev
);
303 void (*aux_detach_dev
)(struct iommu_domain
*domain
, struct device
*dev
);
304 int (*aux_get_pasid
)(struct iommu_domain
*domain
, struct device
*dev
);
306 struct iommu_sva
*(*sva_bind
)(struct device
*dev
, struct mm_struct
*mm
,
308 void (*sva_unbind
)(struct iommu_sva
*handle
);
309 int (*sva_get_pasid
)(struct iommu_sva
*handle
);
311 int (*page_response
)(struct device
*dev
,
312 struct iommu_fault_event
*evt
,
313 struct iommu_page_response
*msg
);
314 int (*cache_invalidate
)(struct iommu_domain
*domain
, struct device
*dev
,
315 struct iommu_cache_invalidate_info
*inv_info
);
316 int (*sva_bind_gpasid
)(struct iommu_domain
*domain
,
317 struct device
*dev
, struct iommu_gpasid_bind_data
*data
);
319 int (*sva_unbind_gpasid
)(struct device
*dev
, int pasid
);
321 unsigned long pgsize_bitmap
;
322 struct module
*owner
;
326 * struct iommu_device - IOMMU core representation of one IOMMU hardware
328 * @list: Used by the iommu-core to keep a list of registered iommus
329 * @ops: iommu-ops for talking to this iommu
330 * @dev: struct device for sysfs handling
332 struct iommu_device
{
333 struct list_head list
;
334 const struct iommu_ops
*ops
;
335 struct fwnode_handle
*fwnode
;
340 * struct iommu_fault_event - Generic fault event
342 * Can represent recoverable faults such as a page requests or
343 * unrecoverable faults such as DMA or IRQ remapping faults.
345 * @fault: fault descriptor
346 * @list: pending fault event list, used for tracking responses
348 struct iommu_fault_event
{
349 struct iommu_fault fault
;
350 struct list_head list
;
354 * struct iommu_fault_param - per-device IOMMU fault data
355 * @handler: Callback function to handle IOMMU faults at device level
356 * @data: handler private data
357 * @faults: holds the pending faults which needs response
358 * @lock: protect pending faults list
360 struct iommu_fault_param
{
361 iommu_dev_fault_handler_t handler
;
363 struct list_head faults
;
368 * struct iommu_param - collection of per-device IOMMU data
370 * @fault_param: IOMMU detected device fault reporting data
372 * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
373 * struct iommu_group *iommu_group;
374 * struct iommu_fwspec *iommu_fwspec;
378 struct iommu_fault_param
*fault_param
;
381 int iommu_device_register(struct iommu_device
*iommu
);
382 void iommu_device_unregister(struct iommu_device
*iommu
);
383 int iommu_device_sysfs_add(struct iommu_device
*iommu
,
384 struct device
*parent
,
385 const struct attribute_group
**groups
,
386 const char *fmt
, ...) __printf(4, 5);
387 void iommu_device_sysfs_remove(struct iommu_device
*iommu
);
388 int iommu_device_link(struct iommu_device
*iommu
, struct device
*link
);
389 void iommu_device_unlink(struct iommu_device
*iommu
, struct device
*link
);
391 static inline void __iommu_device_set_ops(struct iommu_device
*iommu
,
392 const struct iommu_ops
*ops
)
397 #define iommu_device_set_ops(iommu, ops) \
399 struct iommu_ops *__ops = (struct iommu_ops *)(ops); \
400 __ops->owner = THIS_MODULE; \
401 __iommu_device_set_ops(iommu, __ops); \
404 static inline void iommu_device_set_fwnode(struct iommu_device
*iommu
,
405 struct fwnode_handle
*fwnode
)
407 iommu
->fwnode
= fwnode
;
410 static inline struct iommu_device
*dev_to_iommu_device(struct device
*dev
)
412 return (struct iommu_device
*)dev_get_drvdata(dev
);
415 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather
*gather
)
417 *gather
= (struct iommu_iotlb_gather
) {
422 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
423 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
424 #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
425 #define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */
426 #define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */
427 #define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
429 extern int bus_set_iommu(struct bus_type
*bus
, const struct iommu_ops
*ops
);
430 extern bool iommu_present(struct bus_type
*bus
);
431 extern bool iommu_capable(struct bus_type
*bus
, enum iommu_cap cap
);
432 extern struct iommu_domain
*iommu_domain_alloc(struct bus_type
*bus
);
433 extern struct iommu_group
*iommu_group_get_by_id(int id
);
434 extern void iommu_domain_free(struct iommu_domain
*domain
);
435 extern int iommu_attach_device(struct iommu_domain
*domain
,
437 extern void iommu_detach_device(struct iommu_domain
*domain
,
439 extern int iommu_cache_invalidate(struct iommu_domain
*domain
,
441 struct iommu_cache_invalidate_info
*inv_info
);
442 extern int iommu_sva_bind_gpasid(struct iommu_domain
*domain
,
443 struct device
*dev
, struct iommu_gpasid_bind_data
*data
);
444 extern int iommu_sva_unbind_gpasid(struct iommu_domain
*domain
,
445 struct device
*dev
, ioasid_t pasid
);
446 extern struct iommu_domain
*iommu_get_domain_for_dev(struct device
*dev
);
447 extern struct iommu_domain
*iommu_get_dma_domain(struct device
*dev
);
448 extern int iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
449 phys_addr_t paddr
, size_t size
, int prot
);
450 extern int iommu_map_atomic(struct iommu_domain
*domain
, unsigned long iova
,
451 phys_addr_t paddr
, size_t size
, int prot
);
452 extern size_t iommu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
454 extern size_t iommu_unmap_fast(struct iommu_domain
*domain
,
455 unsigned long iova
, size_t size
,
456 struct iommu_iotlb_gather
*iotlb_gather
);
457 extern size_t iommu_map_sg(struct iommu_domain
*domain
, unsigned long iova
,
458 struct scatterlist
*sg
,unsigned int nents
, int prot
);
459 extern size_t iommu_map_sg_atomic(struct iommu_domain
*domain
,
460 unsigned long iova
, struct scatterlist
*sg
,
461 unsigned int nents
, int prot
);
462 extern phys_addr_t
iommu_iova_to_phys(struct iommu_domain
*domain
, dma_addr_t iova
);
463 extern void iommu_set_fault_handler(struct iommu_domain
*domain
,
464 iommu_fault_handler_t handler
, void *token
);
466 extern void iommu_get_resv_regions(struct device
*dev
, struct list_head
*list
);
467 extern void iommu_put_resv_regions(struct device
*dev
, struct list_head
*list
);
468 extern void generic_iommu_put_resv_regions(struct device
*dev
,
469 struct list_head
*list
);
470 extern int iommu_request_dm_for_dev(struct device
*dev
);
471 extern int iommu_request_dma_domain_for_dev(struct device
*dev
);
472 extern void iommu_set_default_passthrough(bool cmd_line
);
473 extern void iommu_set_default_translated(bool cmd_line
);
474 extern bool iommu_default_passthrough(void);
475 extern struct iommu_resv_region
*
476 iommu_alloc_resv_region(phys_addr_t start
, size_t length
, int prot
,
477 enum iommu_resv_type type
);
478 extern int iommu_get_group_resv_regions(struct iommu_group
*group
,
479 struct list_head
*head
);
481 extern int iommu_attach_group(struct iommu_domain
*domain
,
482 struct iommu_group
*group
);
483 extern void iommu_detach_group(struct iommu_domain
*domain
,
484 struct iommu_group
*group
);
485 extern struct iommu_group
*iommu_group_alloc(void);
486 extern void *iommu_group_get_iommudata(struct iommu_group
*group
);
487 extern void iommu_group_set_iommudata(struct iommu_group
*group
,
489 void (*release
)(void *iommu_data
));
490 extern int iommu_group_set_name(struct iommu_group
*group
, const char *name
);
491 extern int iommu_group_add_device(struct iommu_group
*group
,
493 extern void iommu_group_remove_device(struct device
*dev
);
494 extern int iommu_group_for_each_dev(struct iommu_group
*group
, void *data
,
495 int (*fn
)(struct device
*, void *));
496 extern struct iommu_group
*iommu_group_get(struct device
*dev
);
497 extern struct iommu_group
*iommu_group_ref_get(struct iommu_group
*group
);
498 extern void iommu_group_put(struct iommu_group
*group
);
499 extern int iommu_group_register_notifier(struct iommu_group
*group
,
500 struct notifier_block
*nb
);
501 extern int iommu_group_unregister_notifier(struct iommu_group
*group
,
502 struct notifier_block
*nb
);
503 extern int iommu_register_device_fault_handler(struct device
*dev
,
504 iommu_dev_fault_handler_t handler
,
507 extern int iommu_unregister_device_fault_handler(struct device
*dev
);
509 extern int iommu_report_device_fault(struct device
*dev
,
510 struct iommu_fault_event
*evt
);
511 extern int iommu_page_response(struct device
*dev
,
512 struct iommu_page_response
*msg
);
514 extern int iommu_group_id(struct iommu_group
*group
);
515 extern struct iommu_group
*iommu_group_get_for_dev(struct device
*dev
);
516 extern struct iommu_domain
*iommu_group_default_domain(struct iommu_group
*);
518 extern int iommu_domain_get_attr(struct iommu_domain
*domain
, enum iommu_attr
,
520 extern int iommu_domain_set_attr(struct iommu_domain
*domain
, enum iommu_attr
,
523 /* Window handling function prototypes */
524 extern int iommu_domain_window_enable(struct iommu_domain
*domain
, u32 wnd_nr
,
525 phys_addr_t offset
, u64 size
,
527 extern void iommu_domain_window_disable(struct iommu_domain
*domain
, u32 wnd_nr
);
529 extern int report_iommu_fault(struct iommu_domain
*domain
, struct device
*dev
,
530 unsigned long iova
, int flags
);
532 static inline void iommu_flush_tlb_all(struct iommu_domain
*domain
)
534 if (domain
->ops
->flush_iotlb_all
)
535 domain
->ops
->flush_iotlb_all(domain
);
538 static inline void iommu_tlb_sync(struct iommu_domain
*domain
,
539 struct iommu_iotlb_gather
*iotlb_gather
)
541 if (domain
->ops
->iotlb_sync
)
542 domain
->ops
->iotlb_sync(domain
, iotlb_gather
);
544 iommu_iotlb_gather_init(iotlb_gather
);
547 static inline void iommu_iotlb_gather_add_page(struct iommu_domain
*domain
,
548 struct iommu_iotlb_gather
*gather
,
549 unsigned long iova
, size_t size
)
551 unsigned long start
= iova
, end
= start
+ size
;
554 * If the new page is disjoint from the current range or is mapped at
555 * a different granularity, then sync the TLB so that the gather
556 * structure can be rewritten.
558 if (gather
->pgsize
!= size
||
559 end
< gather
->start
|| start
> gather
->end
) {
561 iommu_tlb_sync(domain
, gather
);
562 gather
->pgsize
= size
;
565 if (gather
->end
< end
)
568 if (gather
->start
> start
)
569 gather
->start
= start
;
572 /* PCI device grouping function */
573 extern struct iommu_group
*pci_device_group(struct device
*dev
);
574 /* Generic device grouping function */
575 extern struct iommu_group
*generic_device_group(struct device
*dev
);
576 /* FSL-MC device grouping function */
577 struct iommu_group
*fsl_mc_device_group(struct device
*dev
);
580 * struct iommu_fwspec - per-device IOMMU instance data
581 * @ops: ops for this device's IOMMU
582 * @iommu_fwnode: firmware handle for this device's IOMMU
583 * @iommu_priv: IOMMU driver private data for this device
584 * @num_pasid_bits: number of PASID bits supported by this device
585 * @num_ids: number of associated device IDs
586 * @ids: IDs which this device may present to the IOMMU
588 struct iommu_fwspec
{
589 const struct iommu_ops
*ops
;
590 struct fwnode_handle
*iommu_fwnode
;
594 unsigned int num_ids
;
598 /* ATS is supported */
599 #define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0)
602 * struct iommu_sva - handle to a device-mm bond
606 const struct iommu_sva_ops
*ops
;
609 int iommu_fwspec_init(struct device
*dev
, struct fwnode_handle
*iommu_fwnode
,
610 const struct iommu_ops
*ops
);
611 void iommu_fwspec_free(struct device
*dev
);
612 int iommu_fwspec_add_ids(struct device
*dev
, u32
*ids
, int num_ids
);
613 const struct iommu_ops
*iommu_ops_from_fwnode(struct fwnode_handle
*fwnode
);
615 static inline struct iommu_fwspec
*dev_iommu_fwspec_get(struct device
*dev
)
617 return dev
->iommu_fwspec
;
620 static inline void dev_iommu_fwspec_set(struct device
*dev
,
621 struct iommu_fwspec
*fwspec
)
623 dev
->iommu_fwspec
= fwspec
;
626 int iommu_probe_device(struct device
*dev
);
627 void iommu_release_device(struct device
*dev
);
629 bool iommu_dev_has_feature(struct device
*dev
, enum iommu_dev_features f
);
630 int iommu_dev_enable_feature(struct device
*dev
, enum iommu_dev_features f
);
631 int iommu_dev_disable_feature(struct device
*dev
, enum iommu_dev_features f
);
632 bool iommu_dev_feature_enabled(struct device
*dev
, enum iommu_dev_features f
);
633 int iommu_aux_attach_device(struct iommu_domain
*domain
, struct device
*dev
);
634 void iommu_aux_detach_device(struct iommu_domain
*domain
, struct device
*dev
);
635 int iommu_aux_get_pasid(struct iommu_domain
*domain
, struct device
*dev
);
637 struct iommu_sva
*iommu_sva_bind_device(struct device
*dev
,
638 struct mm_struct
*mm
,
640 void iommu_sva_unbind_device(struct iommu_sva
*handle
);
641 int iommu_sva_set_ops(struct iommu_sva
*handle
,
642 const struct iommu_sva_ops
*ops
);
643 int iommu_sva_get_pasid(struct iommu_sva
*handle
);
645 #else /* CONFIG_IOMMU_API */
648 struct iommu_group
{};
649 struct iommu_fwspec
{};
650 struct iommu_device
{};
651 struct iommu_fault_param
{};
652 struct iommu_iotlb_gather
{};
654 static inline bool iommu_present(struct bus_type
*bus
)
659 static inline bool iommu_capable(struct bus_type
*bus
, enum iommu_cap cap
)
664 static inline struct iommu_domain
*iommu_domain_alloc(struct bus_type
*bus
)
669 static inline struct iommu_group
*iommu_group_get_by_id(int id
)
674 static inline void iommu_domain_free(struct iommu_domain
*domain
)
678 static inline int iommu_attach_device(struct iommu_domain
*domain
,
684 static inline void iommu_detach_device(struct iommu_domain
*domain
,
689 static inline struct iommu_domain
*iommu_get_domain_for_dev(struct device
*dev
)
694 static inline int iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
695 phys_addr_t paddr
, size_t size
, int prot
)
700 static inline int iommu_map_atomic(struct iommu_domain
*domain
,
701 unsigned long iova
, phys_addr_t paddr
,
702 size_t size
, int prot
)
707 static inline size_t iommu_unmap(struct iommu_domain
*domain
,
708 unsigned long iova
, size_t size
)
713 static inline size_t iommu_unmap_fast(struct iommu_domain
*domain
,
714 unsigned long iova
, int gfp_order
,
715 struct iommu_iotlb_gather
*iotlb_gather
)
720 static inline size_t iommu_map_sg(struct iommu_domain
*domain
,
721 unsigned long iova
, struct scatterlist
*sg
,
722 unsigned int nents
, int prot
)
727 static inline size_t iommu_map_sg_atomic(struct iommu_domain
*domain
,
728 unsigned long iova
, struct scatterlist
*sg
,
729 unsigned int nents
, int prot
)
734 static inline void iommu_flush_tlb_all(struct iommu_domain
*domain
)
738 static inline void iommu_tlb_sync(struct iommu_domain
*domain
,
739 struct iommu_iotlb_gather
*iotlb_gather
)
743 static inline int iommu_domain_window_enable(struct iommu_domain
*domain
,
744 u32 wnd_nr
, phys_addr_t paddr
,
750 static inline void iommu_domain_window_disable(struct iommu_domain
*domain
,
755 static inline phys_addr_t
iommu_iova_to_phys(struct iommu_domain
*domain
, dma_addr_t iova
)
760 static inline void iommu_set_fault_handler(struct iommu_domain
*domain
,
761 iommu_fault_handler_t handler
, void *token
)
765 static inline void iommu_get_resv_regions(struct device
*dev
,
766 struct list_head
*list
)
770 static inline void iommu_put_resv_regions(struct device
*dev
,
771 struct list_head
*list
)
775 static inline int iommu_get_group_resv_regions(struct iommu_group
*group
,
776 struct list_head
*head
)
781 static inline int iommu_request_dm_for_dev(struct device
*dev
)
786 static inline int iommu_request_dma_domain_for_dev(struct device
*dev
)
791 static inline void iommu_set_default_passthrough(bool cmd_line
)
795 static inline void iommu_set_default_translated(bool cmd_line
)
799 static inline bool iommu_default_passthrough(void)
804 static inline int iommu_attach_group(struct iommu_domain
*domain
,
805 struct iommu_group
*group
)
810 static inline void iommu_detach_group(struct iommu_domain
*domain
,
811 struct iommu_group
*group
)
815 static inline struct iommu_group
*iommu_group_alloc(void)
817 return ERR_PTR(-ENODEV
);
820 static inline void *iommu_group_get_iommudata(struct iommu_group
*group
)
825 static inline void iommu_group_set_iommudata(struct iommu_group
*group
,
827 void (*release
)(void *iommu_data
))
831 static inline int iommu_group_set_name(struct iommu_group
*group
,
837 static inline int iommu_group_add_device(struct iommu_group
*group
,
843 static inline void iommu_group_remove_device(struct device
*dev
)
847 static inline int iommu_group_for_each_dev(struct iommu_group
*group
,
849 int (*fn
)(struct device
*, void *))
854 static inline struct iommu_group
*iommu_group_get(struct device
*dev
)
859 static inline void iommu_group_put(struct iommu_group
*group
)
863 static inline int iommu_group_register_notifier(struct iommu_group
*group
,
864 struct notifier_block
*nb
)
869 static inline int iommu_group_unregister_notifier(struct iommu_group
*group
,
870 struct notifier_block
*nb
)
876 int iommu_register_device_fault_handler(struct device
*dev
,
877 iommu_dev_fault_handler_t handler
,
883 static inline int iommu_unregister_device_fault_handler(struct device
*dev
)
889 int iommu_report_device_fault(struct device
*dev
, struct iommu_fault_event
*evt
)
894 static inline int iommu_page_response(struct device
*dev
,
895 struct iommu_page_response
*msg
)
900 static inline int iommu_group_id(struct iommu_group
*group
)
905 static inline int iommu_domain_get_attr(struct iommu_domain
*domain
,
906 enum iommu_attr attr
, void *data
)
911 static inline int iommu_domain_set_attr(struct iommu_domain
*domain
,
912 enum iommu_attr attr
, void *data
)
917 static inline int iommu_device_register(struct iommu_device
*iommu
)
922 static inline void iommu_device_set_ops(struct iommu_device
*iommu
,
923 const struct iommu_ops
*ops
)
927 static inline void iommu_device_set_fwnode(struct iommu_device
*iommu
,
928 struct fwnode_handle
*fwnode
)
932 static inline struct iommu_device
*dev_to_iommu_device(struct device
*dev
)
937 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather
*gather
)
941 static inline void iommu_iotlb_gather_add_page(struct iommu_domain
*domain
,
942 struct iommu_iotlb_gather
*gather
,
943 unsigned long iova
, size_t size
)
947 static inline void iommu_device_unregister(struct iommu_device
*iommu
)
951 static inline int iommu_device_sysfs_add(struct iommu_device
*iommu
,
952 struct device
*parent
,
953 const struct attribute_group
**groups
,
954 const char *fmt
, ...)
959 static inline void iommu_device_sysfs_remove(struct iommu_device
*iommu
)
963 static inline int iommu_device_link(struct device
*dev
, struct device
*link
)
968 static inline void iommu_device_unlink(struct device
*dev
, struct device
*link
)
972 static inline int iommu_fwspec_init(struct device
*dev
,
973 struct fwnode_handle
*iommu_fwnode
,
974 const struct iommu_ops
*ops
)
979 static inline void iommu_fwspec_free(struct device
*dev
)
983 static inline int iommu_fwspec_add_ids(struct device
*dev
, u32
*ids
,
990 const struct iommu_ops
*iommu_ops_from_fwnode(struct fwnode_handle
*fwnode
)
996 iommu_dev_has_feature(struct device
*dev
, enum iommu_dev_features feat
)
1002 iommu_dev_feature_enabled(struct device
*dev
, enum iommu_dev_features feat
)
1008 iommu_dev_enable_feature(struct device
*dev
, enum iommu_dev_features feat
)
1014 iommu_dev_disable_feature(struct device
*dev
, enum iommu_dev_features feat
)
1020 iommu_aux_attach_device(struct iommu_domain
*domain
, struct device
*dev
)
1026 iommu_aux_detach_device(struct iommu_domain
*domain
, struct device
*dev
)
1031 iommu_aux_get_pasid(struct iommu_domain
*domain
, struct device
*dev
)
1036 static inline struct iommu_sva
*
1037 iommu_sva_bind_device(struct device
*dev
, struct mm_struct
*mm
, void *drvdata
)
1042 static inline void iommu_sva_unbind_device(struct iommu_sva
*handle
)
1046 static inline int iommu_sva_set_ops(struct iommu_sva
*handle
,
1047 const struct iommu_sva_ops
*ops
)
1052 static inline int iommu_sva_get_pasid(struct iommu_sva
*handle
)
1054 return IOMMU_PASID_INVALID
;
1058 iommu_cache_invalidate(struct iommu_domain
*domain
,
1060 struct iommu_cache_invalidate_info
*inv_info
)
1064 static inline int iommu_sva_bind_gpasid(struct iommu_domain
*domain
,
1065 struct device
*dev
, struct iommu_gpasid_bind_data
*data
)
1070 static inline int iommu_sva_unbind_gpasid(struct iommu_domain
*domain
,
1071 struct device
*dev
, int pasid
)
1076 #endif /* CONFIG_IOMMU_API */
1078 #ifdef CONFIG_IOMMU_DEBUGFS
1079 extern struct dentry
*iommu_debugfs_dir
;
1080 void iommu_debugfs_setup(void);
1082 static inline void iommu_debugfs_setup(void) {}
1085 #endif /* __LINUX_IOMMU_H */