2 * Copyright (c) 2006, Intel Corporation.
4 * This file is released under the GPLv2.
6 * Copyright (C) 2006 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
12 init_iova_domain(struct iova_domain
*iovad
)
14 spin_lock_init(&iovad
->iova_alloc_lock
);
15 spin_lock_init(&iovad
->iova_rbtree_lock
);
16 iovad
->rbroot
= RB_ROOT
;
17 iovad
->cached32_node
= NULL
;
21 static struct rb_node
*
22 __get_cached_rbnode(struct iova_domain
*iovad
, unsigned long *limit_pfn
)
24 if ((*limit_pfn
!= DMA_32BIT_PFN
) ||
25 (iovad
->cached32_node
== NULL
))
26 return rb_last(&iovad
->rbroot
);
28 struct rb_node
*prev_node
= rb_prev(iovad
->cached32_node
);
29 struct iova
*curr_iova
=
30 container_of(iovad
->cached32_node
, struct iova
, node
);
31 *limit_pfn
= curr_iova
->pfn_lo
- 1;
37 __cached_rbnode_insert_update(struct iova_domain
*iovad
,
38 unsigned long limit_pfn
, struct iova
*new)
40 if (limit_pfn
!= DMA_32BIT_PFN
)
42 iovad
->cached32_node
= &new->node
;
46 __cached_rbnode_delete_update(struct iova_domain
*iovad
, struct iova
*free
)
48 struct iova
*cached_iova
;
51 if (!iovad
->cached32_node
)
53 curr
= iovad
->cached32_node
;
54 cached_iova
= container_of(curr
, struct iova
, node
);
56 if (free
->pfn_lo
>= cached_iova
->pfn_lo
)
57 iovad
->cached32_node
= rb_next(&free
->node
);
60 /* Computes the padding size required, to make the
61 * the start address naturally aligned on its size
64 iova_get_pad_size(int size
, unsigned int limit_pfn
)
66 unsigned int pad_size
= 0;
67 unsigned int order
= ilog2(size
);
70 pad_size
= (limit_pfn
+ 1) % (1 << order
);
75 static int __alloc_iova_range(struct iova_domain
*iovad
, unsigned long size
,
76 unsigned long limit_pfn
, struct iova
*new, bool size_aligned
)
78 struct rb_node
*curr
= NULL
;
80 unsigned long saved_pfn
;
81 unsigned int pad_size
= 0;
83 /* Walk the tree backwards */
84 spin_lock_irqsave(&iovad
->iova_rbtree_lock
, flags
);
85 saved_pfn
= limit_pfn
;
86 curr
= __get_cached_rbnode(iovad
, &limit_pfn
);
88 struct iova
*curr_iova
= container_of(curr
, struct iova
, node
);
89 if (limit_pfn
< curr_iova
->pfn_lo
)
91 else if (limit_pfn
< curr_iova
->pfn_hi
)
92 goto adjust_limit_pfn
;
95 pad_size
= iova_get_pad_size(size
, limit_pfn
);
96 if ((curr_iova
->pfn_hi
+ size
+ pad_size
) <= limit_pfn
)
97 break; /* found a free slot */
100 limit_pfn
= curr_iova
->pfn_lo
- 1;
102 curr
= rb_prev(curr
);
107 pad_size
= iova_get_pad_size(size
, limit_pfn
);
108 if ((IOVA_START_PFN
+ size
+ pad_size
) > limit_pfn
) {
109 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
114 /* pfn_lo will point to size aligned address if size_aligned is set */
115 new->pfn_lo
= limit_pfn
- (size
+ pad_size
) + 1;
116 new->pfn_hi
= new->pfn_lo
+ size
- 1;
118 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
123 iova_insert_rbtree(struct rb_root
*root
, struct iova
*iova
)
125 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
126 /* Figure out where to put new node */
128 struct iova
*this = container_of(*new, struct iova
, node
);
131 if (iova
->pfn_lo
< this->pfn_lo
)
132 new = &((*new)->rb_left
);
133 else if (iova
->pfn_lo
> this->pfn_lo
)
134 new = &((*new)->rb_right
);
136 BUG(); /* this should not happen */
138 /* Add new node and rebalance tree. */
139 rb_link_node(&iova
->node
, parent
, new);
140 rb_insert_color(&iova
->node
, root
);
144 * alloc_iova - allocates an iova
145 * @iovad - iova domain in question
146 * @size - size of page frames to allocate
147 * @limit_pfn - max limit address
148 * @size_aligned - set if size_aligned address range is required
149 * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
150 * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
151 * flag is set then the allocated address iova->pfn_lo will be naturally
152 * aligned on roundup_power_of_two(size).
155 alloc_iova(struct iova_domain
*iovad
, unsigned long size
,
156 unsigned long limit_pfn
,
160 struct iova
*new_iova
;
163 new_iova
= alloc_iova_mem();
167 /* If size aligned is set then round the size to
168 * to next power of two.
171 size
= __roundup_pow_of_two(size
);
173 spin_lock_irqsave(&iovad
->iova_alloc_lock
, flags
);
174 ret
= __alloc_iova_range(iovad
, size
, limit_pfn
, new_iova
,
178 spin_unlock_irqrestore(&iovad
->iova_alloc_lock
, flags
);
179 free_iova_mem(new_iova
);
183 /* Insert the new_iova into domain rbtree by holding writer lock */
184 spin_lock(&iovad
->iova_rbtree_lock
);
185 iova_insert_rbtree(&iovad
->rbroot
, new_iova
);
186 __cached_rbnode_insert_update(iovad
, limit_pfn
, new_iova
);
187 spin_unlock(&iovad
->iova_rbtree_lock
);
189 spin_unlock_irqrestore(&iovad
->iova_alloc_lock
, flags
);
195 * find_iova - find's an iova for a given pfn
196 * @iovad - iova domain in question.
197 * pfn - page frame number
198 * This function finds and returns an iova belonging to the
199 * given doamin which matches the given pfn.
201 struct iova
*find_iova(struct iova_domain
*iovad
, unsigned long pfn
)
204 struct rb_node
*node
;
206 /* Take the lock so that no other thread is manipulating the rbtree */
207 spin_lock_irqsave(&iovad
->iova_rbtree_lock
, flags
);
208 node
= iovad
->rbroot
.rb_node
;
210 struct iova
*iova
= container_of(node
, struct iova
, node
);
212 /* If pfn falls within iova's range, return iova */
213 if ((pfn
>= iova
->pfn_lo
) && (pfn
<= iova
->pfn_hi
)) {
214 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
215 /* We are not holding the lock while this iova
216 * is referenced by the caller as the same thread
217 * which called this function also calls __free_iova()
218 * and it is by desing that only one thread can possibly
219 * reference a particular iova and hence no conflict.
224 if (pfn
< iova
->pfn_lo
)
225 node
= node
->rb_left
;
226 else if (pfn
> iova
->pfn_lo
)
227 node
= node
->rb_right
;
230 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
235 * __free_iova - frees the given iova
236 * @iovad: iova domain in question.
237 * @iova: iova in question.
238 * Frees the given iova belonging to the giving domain
241 __free_iova(struct iova_domain
*iovad
, struct iova
*iova
)
245 spin_lock_irqsave(&iovad
->iova_rbtree_lock
, flags
);
246 __cached_rbnode_delete_update(iovad
, iova
);
247 rb_erase(&iova
->node
, &iovad
->rbroot
);
248 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
253 * free_iova - finds and frees the iova for a given pfn
254 * @iovad: - iova domain in question.
255 * @pfn: - pfn that is allocated previously
256 * This functions finds an iova for a given pfn and then
257 * frees the iova from that domain.
260 free_iova(struct iova_domain
*iovad
, unsigned long pfn
)
262 struct iova
*iova
= find_iova(iovad
, pfn
);
264 __free_iova(iovad
, iova
);
269 * put_iova_domain - destroys the iova doamin
270 * @iovad: - iova domain in question.
271 * All the iova's in that domain are destroyed.
273 void put_iova_domain(struct iova_domain
*iovad
)
275 struct rb_node
*node
;
278 spin_lock_irqsave(&iovad
->iova_rbtree_lock
, flags
);
279 node
= rb_first(&iovad
->rbroot
);
281 struct iova
*iova
= container_of(node
, struct iova
, node
);
282 rb_erase(node
, &iovad
->rbroot
);
284 node
= rb_first(&iovad
->rbroot
);
286 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
290 __is_range_overlap(struct rb_node
*node
,
291 unsigned long pfn_lo
, unsigned long pfn_hi
)
293 struct iova
*iova
= container_of(node
, struct iova
, node
);
295 if ((pfn_lo
<= iova
->pfn_hi
) && (pfn_hi
>= iova
->pfn_lo
))
301 __insert_new_range(struct iova_domain
*iovad
,
302 unsigned long pfn_lo
, unsigned long pfn_hi
)
306 iova
= alloc_iova_mem();
310 iova
->pfn_hi
= pfn_hi
;
311 iova
->pfn_lo
= pfn_lo
;
312 iova_insert_rbtree(&iovad
->rbroot
, iova
);
317 __adjust_overlap_range(struct iova
*iova
,
318 unsigned long *pfn_lo
, unsigned long *pfn_hi
)
320 if (*pfn_lo
< iova
->pfn_lo
)
321 iova
->pfn_lo
= *pfn_lo
;
322 if (*pfn_hi
> iova
->pfn_hi
)
323 *pfn_lo
= iova
->pfn_hi
+ 1;
327 * reserve_iova - reserves an iova in the given range
328 * @iovad: - iova domain pointer
329 * @pfn_lo: - lower page frame address
330 * @pfn_hi:- higher pfn adderss
331 * This function allocates reserves the address range from pfn_lo to pfn_hi so
332 * that this address is not dished out as part of alloc_iova.
335 reserve_iova(struct iova_domain
*iovad
,
336 unsigned long pfn_lo
, unsigned long pfn_hi
)
338 struct rb_node
*node
;
341 unsigned int overlap
= 0;
343 spin_lock_irqsave(&iovad
->iova_alloc_lock
, flags
);
344 spin_lock(&iovad
->iova_rbtree_lock
);
345 for (node
= rb_first(&iovad
->rbroot
); node
; node
= rb_next(node
)) {
346 if (__is_range_overlap(node
, pfn_lo
, pfn_hi
)) {
347 iova
= container_of(node
, struct iova
, node
);
348 __adjust_overlap_range(iova
, &pfn_lo
, &pfn_hi
);
349 if ((pfn_lo
>= iova
->pfn_lo
) &&
350 (pfn_hi
<= iova
->pfn_hi
))
358 /* We are here either becasue this is the first reserver node
359 * or need to insert remaining non overlap addr range
361 iova
= __insert_new_range(iovad
, pfn_lo
, pfn_hi
);
364 spin_unlock(&iovad
->iova_rbtree_lock
);
365 spin_unlock_irqrestore(&iovad
->iova_alloc_lock
, flags
);
370 * copy_reserved_iova - copies the reserved between domains
371 * @from: - source doamin from where to copy
372 * @to: - destination domin where to copy
373 * This function copies reserved iova's from one doamin to
377 copy_reserved_iova(struct iova_domain
*from
, struct iova_domain
*to
)
380 struct rb_node
*node
;
382 spin_lock_irqsave(&from
->iova_alloc_lock
, flags
);
383 spin_lock(&from
->iova_rbtree_lock
);
384 for (node
= rb_first(&from
->rbroot
); node
; node
= rb_next(node
)) {
385 struct iova
*iova
= container_of(node
, struct iova
, node
);
386 struct iova
*new_iova
;
387 new_iova
= reserve_iova(to
, iova
->pfn_lo
, iova
->pfn_hi
);
389 printk(KERN_ERR
"Reserve iova range %lx@%lx failed\n",
390 iova
->pfn_lo
, iova
->pfn_lo
);
392 spin_unlock(&from
->iova_rbtree_lock
);
393 spin_unlock_irqrestore(&from
->iova_alloc_lock
, flags
);