3 #include <linux/bitops.h>
6 * Public API for use by IOMMU drivers
18 * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management.
20 * @tlb_flush_all: Synchronously invalidate the entire TLB context.
21 * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
22 * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and
23 * any corresponding page table updates are visible to the
26 * Note that these can all be called in atomic context and must therefore
29 struct iommu_gather_ops
{
30 void (*tlb_flush_all
)(void *cookie
);
31 void (*tlb_add_flush
)(unsigned long iova
, size_t size
, size_t granule
,
32 bool leaf
, void *cookie
);
33 void (*tlb_sync
)(void *cookie
);
37 * struct io_pgtable_cfg - Configuration data for a set of page tables.
39 * @quirks: A bitmap of hardware quirks that require some special
40 * action by the low-level page table allocator.
41 * @pgsize_bitmap: A bitmap of page sizes supported by this set of page
43 * @ias: Input address (iova) size, in bits.
44 * @oas: Output address (paddr) size, in bits.
45 * @tlb: TLB management callbacks for this set of tables.
46 * @iommu_dev: The device representing the DMA configuration for the
49 struct io_pgtable_cfg
{
51 * IO_PGTABLE_QUIRK_ARM_NS: (ARM formats) Set NS and NSTABLE bits in
52 * stage 1 PTEs, for hardware which insists on validating them
53 * even in non-secure state where they should normally be ignored.
55 * IO_PGTABLE_QUIRK_NO_PERMS: Ignore the IOMMU_READ, IOMMU_WRITE and
56 * IOMMU_NOEXEC flags and map everything with full access, for
57 * hardware which does not implement the permissions of a given
58 * format, and/or requires some format-specific default value.
60 * IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid
61 * (unmapped) entries but the hardware might do so anyway, perform
62 * TLB maintenance when mapping as well as when unmapping.
64 #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
65 #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
66 #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
68 unsigned long pgsize_bitmap
;
71 const struct iommu_gather_ops
*tlb
;
72 struct device
*iommu_dev
;
74 /* Low-level data specific to the table format */
97 * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
99 * @map: Map a physically contiguous memory region.
100 * @unmap: Unmap a physically contiguous memory region.
101 * @iova_to_phys: Translate iova to physical address.
103 * These functions map directly onto the iommu_ops member functions with
106 struct io_pgtable_ops
{
107 int (*map
)(struct io_pgtable_ops
*ops
, unsigned long iova
,
108 phys_addr_t paddr
, size_t size
, int prot
);
109 int (*unmap
)(struct io_pgtable_ops
*ops
, unsigned long iova
,
111 phys_addr_t (*iova_to_phys
)(struct io_pgtable_ops
*ops
,
116 * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
118 * @fmt: The page table format.
119 * @cfg: The page table configuration. This will be modified to represent
120 * the configuration actually provided by the allocator (e.g. the
121 * pgsize_bitmap may be restricted).
122 * @cookie: An opaque token provided by the IOMMU driver and passed back to
123 * the callback routines in cfg->tlb.
125 struct io_pgtable_ops
*alloc_io_pgtable_ops(enum io_pgtable_fmt fmt
,
126 struct io_pgtable_cfg
*cfg
,
130 * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
131 * *must* ensure that the page table is no longer
132 * live, but the TLB can be dirty.
134 * @ops: The ops returned from alloc_io_pgtable_ops.
136 void free_io_pgtable_ops(struct io_pgtable_ops
*ops
);
140 * Internal structures for page table allocator implementations.
144 * struct io_pgtable - Internal structure describing a set of page tables.
146 * @fmt: The page table format.
147 * @cookie: An opaque token provided by the IOMMU driver and passed back to
148 * any callback routines.
149 * @tlb_sync_pending: Private flag for optimising out redundant syncs.
150 * @cfg: A copy of the page table configuration.
151 * @ops: The page table operations in use for this set of page tables.
154 enum io_pgtable_fmt fmt
;
156 bool tlb_sync_pending
;
157 struct io_pgtable_cfg cfg
;
158 struct io_pgtable_ops ops
;
161 #define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
163 static inline void io_pgtable_tlb_flush_all(struct io_pgtable
*iop
)
165 iop
->cfg
.tlb
->tlb_flush_all(iop
->cookie
);
166 iop
->tlb_sync_pending
= true;
169 static inline void io_pgtable_tlb_add_flush(struct io_pgtable
*iop
,
170 unsigned long iova
, size_t size
, size_t granule
, bool leaf
)
172 iop
->cfg
.tlb
->tlb_add_flush(iova
, size
, granule
, leaf
, iop
->cookie
);
173 iop
->tlb_sync_pending
= true;
176 static inline void io_pgtable_tlb_sync(struct io_pgtable
*iop
)
178 if (iop
->tlb_sync_pending
) {
179 iop
->cfg
.tlb
->tlb_sync(iop
->cookie
);
180 iop
->tlb_sync_pending
= false;
185 * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
188 * @alloc: Allocate a set of page tables described by cfg.
189 * @free: Free the page tables associated with iop.
191 struct io_pgtable_init_fns
{
192 struct io_pgtable
*(*alloc
)(struct io_pgtable_cfg
*cfg
, void *cookie
);
193 void (*free
)(struct io_pgtable
*iop
);
196 extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns
;
197 extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns
;
198 extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns
;
199 extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns
;
200 extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns
;
202 #endif /* __IO_PGTABLE_H */