1 // SPDX-License-Identifier: GPL-2.0
3 * IOMMU mmap management and range allocation functions.
4 * Based almost entirely upon the powerpc iommu allocator.
7 #include <linux/export.h>
8 #include <linux/bitmap.h>
10 #include <linux/iommu-helper.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/hash.h>
13 #include <asm/iommu-common.h>
15 static unsigned long iommu_large_alloc
= 15;
17 static DEFINE_PER_CPU(unsigned int, iommu_hash_common
);
19 static inline bool need_flush(struct iommu_map_table
*iommu
)
21 return ((iommu
->flags
& IOMMU_NEED_FLUSH
) != 0);
24 static inline void set_flush(struct iommu_map_table
*iommu
)
26 iommu
->flags
|= IOMMU_NEED_FLUSH
;
29 static inline void clear_flush(struct iommu_map_table
*iommu
)
31 iommu
->flags
&= ~IOMMU_NEED_FLUSH
;
34 static void setup_iommu_pool_hash(void)
42 for_each_possible_cpu(i
)
43 per_cpu(iommu_hash_common
, i
) = hash_32(i
, IOMMU_POOL_HASHBITS
);
47 * Initialize iommu_pool entries for the iommu_map_table. `num_entries'
48 * is the number of table entries. If `large_pool' is set to true,
49 * the top 1/4 of the table will be set aside for pool allocations
50 * of more than iommu_large_alloc pages.
52 void iommu_tbl_pool_init(struct iommu_map_table
*iommu
,
53 unsigned long num_entries
,
55 void (*lazy_flush
)(struct iommu_map_table
*),
56 bool large_pool
, u32 npools
,
57 bool skip_span_boundary_check
)
59 unsigned int start
, i
;
60 struct iommu_pool
*p
= &(iommu
->large_pool
);
62 setup_iommu_pool_hash();
64 iommu
->nr_pools
= IOMMU_NR_POOLS
;
66 iommu
->nr_pools
= npools
;
67 BUG_ON(npools
> IOMMU_NR_POOLS
);
69 iommu
->table_shift
= table_shift
;
70 iommu
->lazy_flush
= lazy_flush
;
72 if (skip_span_boundary_check
)
73 iommu
->flags
|= IOMMU_NO_SPAN_BOUND
;
75 iommu
->flags
|= IOMMU_HAS_LARGE_POOL
;
78 iommu
->poolsize
= num_entries
/iommu
->nr_pools
;
80 iommu
->poolsize
= (num_entries
* 3 / 4)/iommu
->nr_pools
;
81 for (i
= 0; i
< iommu
->nr_pools
; i
++) {
82 spin_lock_init(&(iommu
->pools
[i
].lock
));
83 iommu
->pools
[i
].start
= start
;
84 iommu
->pools
[i
].hint
= start
;
85 start
+= iommu
->poolsize
; /* start for next pool */
86 iommu
->pools
[i
].end
= start
- 1;
90 /* initialize large_pool */
91 spin_lock_init(&(p
->lock
));
97 unsigned long iommu_tbl_range_alloc(struct device
*dev
,
98 struct iommu_map_table
*iommu
,
100 unsigned long *handle
,
102 unsigned int align_order
)
104 unsigned int pool_hash
= __this_cpu_read(iommu_hash_common
);
105 unsigned long n
, end
, start
, limit
, boundary_size
;
106 struct iommu_pool
*pool
;
108 unsigned int pool_nr
;
109 unsigned int npools
= iommu
->nr_pools
;
111 bool large_pool
= ((iommu
->flags
& IOMMU_HAS_LARGE_POOL
) != 0);
112 bool largealloc
= (large_pool
&& npages
> iommu_large_alloc
);
114 unsigned long align_mask
= 0;
117 align_mask
= ~0ul >> (BITS_PER_LONG
- align_order
);
120 if (unlikely(npages
== 0)) {
122 return IOMMU_ERROR_CODE
;
126 pool
= &(iommu
->large_pool
);
127 pool_nr
= 0; /* to keep compiler happy */
129 /* pick out pool_nr */
130 pool_nr
= pool_hash
& (npools
- 1);
131 pool
= &(iommu
->pools
[pool_nr
]);
133 spin_lock_irqsave(&pool
->lock
, flags
);
136 if (pass
== 0 && handle
&& *handle
&&
137 (*handle
>= pool
->start
) && (*handle
< pool
->end
))
144 /* The case below can happen if we have a small segment appended
145 * to a large, or when the previous alloc was at the very end of
146 * the available space. If so, go back to the beginning. If a
147 * flush is needed, it will get done based on the return value
148 * from iommu_area_alloc() below.
152 shift
= iommu
->table_map_base
>> iommu
->table_shift
;
153 if (limit
+ shift
> mask
) {
154 limit
= mask
- shift
+ 1;
155 /* If we're constrained on address range, first try
156 * at the masked hint to avoid O(n) search complexity,
157 * but on second pass, start at 0 in pool 0.
159 if ((start
& mask
) >= limit
|| pass
> 0) {
160 spin_unlock(&(pool
->lock
));
161 pool
= &(iommu
->pools
[0]);
162 spin_lock(&(pool
->lock
));
170 * if the skip_span_boundary_check had been set during init, we set
171 * things up so that iommu_is_span_boundary() merely checks if the
172 * (index + npages) < num_tsb_entries
174 if ((iommu
->flags
& IOMMU_NO_SPAN_BOUND
) != 0) {
176 boundary_size
= iommu
->poolsize
* iommu
->nr_pools
;
178 boundary_size
= dma_get_seg_boundary_nr_pages(dev
,
181 n
= iommu_area_alloc(iommu
->map
, limit
, start
, npages
, shift
,
182 boundary_size
, align_mask
);
184 if (likely(pass
== 0)) {
185 /* First failure, rescan from the beginning. */
186 pool
->hint
= pool
->start
;
190 } else if (!largealloc
&& pass
<= iommu
->nr_pools
) {
191 spin_unlock(&(pool
->lock
));
192 pool_nr
= (pool_nr
+ 1) & (iommu
->nr_pools
- 1);
193 pool
= &(iommu
->pools
[pool_nr
]);
194 spin_lock(&(pool
->lock
));
195 pool
->hint
= pool
->start
;
201 n
= IOMMU_ERROR_CODE
;
205 if (iommu
->lazy_flush
&&
206 (n
< pool
->hint
|| need_flush(iommu
))) {
208 iommu
->lazy_flush(iommu
);
214 /* Update handle for SG allocations */
218 spin_unlock_irqrestore(&(pool
->lock
), flags
);
223 static struct iommu_pool
*get_pool(struct iommu_map_table
*tbl
,
226 struct iommu_pool
*p
;
227 unsigned long largepool_start
= tbl
->large_pool
.start
;
228 bool large_pool
= ((tbl
->flags
& IOMMU_HAS_LARGE_POOL
) != 0);
230 /* The large pool is the last pool at the top of the table */
231 if (large_pool
&& entry
>= largepool_start
) {
232 p
= &tbl
->large_pool
;
234 unsigned int pool_nr
= entry
/ tbl
->poolsize
;
236 BUG_ON(pool_nr
>= tbl
->nr_pools
);
237 p
= &tbl
->pools
[pool_nr
];
242 /* Caller supplies the index of the entry into the iommu map table
243 * itself when the mapping from dma_addr to the entry is not the
244 * default addr->entry mapping below.
246 void iommu_tbl_range_free(struct iommu_map_table
*iommu
, u64 dma_addr
,
247 unsigned long npages
, unsigned long entry
)
249 struct iommu_pool
*pool
;
251 unsigned long shift
= iommu
->table_shift
;
253 if (entry
== IOMMU_ERROR_CODE
) /* use default addr->entry mapping */
254 entry
= (dma_addr
- iommu
->table_map_base
) >> shift
;
255 pool
= get_pool(iommu
, entry
);
257 spin_lock_irqsave(&(pool
->lock
), flags
);
258 bitmap_clear(iommu
->map
, entry
, npages
);
259 spin_unlock_irqrestore(&(pool
->lock
), flags
);