ARM: pmu: add support for interrupt-affinity property
[linux/fpc-iii.git] / drivers / iommu / iova.c
blob9dd8208312c2e75874ce70dcc6008a166eb5ffca
1 /*
2 * Copyright © 2006-2009, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
20 #include <linux/iova.h>
21 #include <linux/slab.h>
23 static struct kmem_cache *iommu_iova_cache;
25 int iommu_iova_cache_init(void)
27 int ret = 0;
29 iommu_iova_cache = kmem_cache_create("iommu_iova",
30 sizeof(struct iova),
32 SLAB_HWCACHE_ALIGN,
33 NULL);
34 if (!iommu_iova_cache) {
35 pr_err("Couldn't create iova cache\n");
36 ret = -ENOMEM;
39 return ret;
42 void iommu_iova_cache_destroy(void)
44 kmem_cache_destroy(iommu_iova_cache);
47 struct iova *alloc_iova_mem(void)
49 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
52 void free_iova_mem(struct iova *iova)
54 kmem_cache_free(iommu_iova_cache, iova);
57 void
58 init_iova_domain(struct iova_domain *iovad, unsigned long granule,
59 unsigned long start_pfn, unsigned long pfn_32bit)
62 * IOVA granularity will normally be equal to the smallest
63 * supported IOMMU page size; both *must* be capable of
64 * representing individual CPU pages exactly.
66 BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
68 spin_lock_init(&iovad->iova_rbtree_lock);
69 iovad->rbroot = RB_ROOT;
70 iovad->cached32_node = NULL;
71 iovad->granule = granule;
72 iovad->start_pfn = start_pfn;
73 iovad->dma_32bit_pfn = pfn_32bit;
76 static struct rb_node *
77 __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
79 if ((*limit_pfn != iovad->dma_32bit_pfn) ||
80 (iovad->cached32_node == NULL))
81 return rb_last(&iovad->rbroot);
82 else {
83 struct rb_node *prev_node = rb_prev(iovad->cached32_node);
84 struct iova *curr_iova =
85 container_of(iovad->cached32_node, struct iova, node);
86 *limit_pfn = curr_iova->pfn_lo - 1;
87 return prev_node;
91 static void
92 __cached_rbnode_insert_update(struct iova_domain *iovad,
93 unsigned long limit_pfn, struct iova *new)
95 if (limit_pfn != iovad->dma_32bit_pfn)
96 return;
97 iovad->cached32_node = &new->node;
100 static void
101 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
103 struct iova *cached_iova;
104 struct rb_node *curr;
106 if (!iovad->cached32_node)
107 return;
108 curr = iovad->cached32_node;
109 cached_iova = container_of(curr, struct iova, node);
111 if (free->pfn_lo >= cached_iova->pfn_lo) {
112 struct rb_node *node = rb_next(&free->node);
113 struct iova *iova = container_of(node, struct iova, node);
115 /* only cache if it's below 32bit pfn */
116 if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
117 iovad->cached32_node = node;
118 else
119 iovad->cached32_node = NULL;
123 /* Computes the padding size required, to make the
124 * the start address naturally aligned on its size
126 static int
127 iova_get_pad_size(int size, unsigned int limit_pfn)
129 unsigned int pad_size = 0;
130 unsigned int order = ilog2(size);
132 if (order)
133 pad_size = (limit_pfn + 1) % (1 << order);
135 return pad_size;
138 static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
139 unsigned long size, unsigned long limit_pfn,
140 struct iova *new, bool size_aligned)
142 struct rb_node *prev, *curr = NULL;
143 unsigned long flags;
144 unsigned long saved_pfn;
145 unsigned int pad_size = 0;
147 /* Walk the tree backwards */
148 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
149 saved_pfn = limit_pfn;
150 curr = __get_cached_rbnode(iovad, &limit_pfn);
151 prev = curr;
152 while (curr) {
153 struct iova *curr_iova = container_of(curr, struct iova, node);
155 if (limit_pfn < curr_iova->pfn_lo)
156 goto move_left;
157 else if (limit_pfn < curr_iova->pfn_hi)
158 goto adjust_limit_pfn;
159 else {
160 if (size_aligned)
161 pad_size = iova_get_pad_size(size, limit_pfn);
162 if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
163 break; /* found a free slot */
165 adjust_limit_pfn:
166 limit_pfn = curr_iova->pfn_lo - 1;
167 move_left:
168 prev = curr;
169 curr = rb_prev(curr);
172 if (!curr) {
173 if (size_aligned)
174 pad_size = iova_get_pad_size(size, limit_pfn);
175 if ((iovad->start_pfn + size + pad_size) > limit_pfn) {
176 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
177 return -ENOMEM;
181 /* pfn_lo will point to size aligned address if size_aligned is set */
182 new->pfn_lo = limit_pfn - (size + pad_size) + 1;
183 new->pfn_hi = new->pfn_lo + size - 1;
185 /* Insert the new_iova into domain rbtree by holding writer lock */
186 /* Add new node and rebalance tree. */
188 struct rb_node **entry, *parent = NULL;
190 /* If we have 'prev', it's a valid place to start the
191 insertion. Otherwise, start from the root. */
192 if (prev)
193 entry = &prev;
194 else
195 entry = &iovad->rbroot.rb_node;
197 /* Figure out where to put new node */
198 while (*entry) {
199 struct iova *this = container_of(*entry,
200 struct iova, node);
201 parent = *entry;
203 if (new->pfn_lo < this->pfn_lo)
204 entry = &((*entry)->rb_left);
205 else if (new->pfn_lo > this->pfn_lo)
206 entry = &((*entry)->rb_right);
207 else
208 BUG(); /* this should not happen */
211 /* Add new node and rebalance tree. */
212 rb_link_node(&new->node, parent, entry);
213 rb_insert_color(&new->node, &iovad->rbroot);
215 __cached_rbnode_insert_update(iovad, saved_pfn, new);
217 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
220 return 0;
223 static void
224 iova_insert_rbtree(struct rb_root *root, struct iova *iova)
226 struct rb_node **new = &(root->rb_node), *parent = NULL;
227 /* Figure out where to put new node */
228 while (*new) {
229 struct iova *this = container_of(*new, struct iova, node);
230 parent = *new;
232 if (iova->pfn_lo < this->pfn_lo)
233 new = &((*new)->rb_left);
234 else if (iova->pfn_lo > this->pfn_lo)
235 new = &((*new)->rb_right);
236 else
237 BUG(); /* this should not happen */
239 /* Add new node and rebalance tree. */
240 rb_link_node(&iova->node, parent, new);
241 rb_insert_color(&iova->node, root);
245 * alloc_iova - allocates an iova
246 * @iovad: - iova domain in question
247 * @size: - size of page frames to allocate
248 * @limit_pfn: - max limit address
249 * @size_aligned: - set if size_aligned address range is required
250 * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
251 * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
252 * flag is set then the allocated address iova->pfn_lo will be naturally
253 * aligned on roundup_power_of_two(size).
255 struct iova *
256 alloc_iova(struct iova_domain *iovad, unsigned long size,
257 unsigned long limit_pfn,
258 bool size_aligned)
260 struct iova *new_iova;
261 int ret;
263 new_iova = alloc_iova_mem();
264 if (!new_iova)
265 return NULL;
267 /* If size aligned is set then round the size to
268 * to next power of two.
270 if (size_aligned)
271 size = __roundup_pow_of_two(size);
273 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
274 new_iova, size_aligned);
276 if (ret) {
277 free_iova_mem(new_iova);
278 return NULL;
281 return new_iova;
285 * find_iova - find's an iova for a given pfn
286 * @iovad: - iova domain in question.
287 * @pfn: - page frame number
288 * This function finds and returns an iova belonging to the
289 * given doamin which matches the given pfn.
291 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
293 unsigned long flags;
294 struct rb_node *node;
296 /* Take the lock so that no other thread is manipulating the rbtree */
297 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
298 node = iovad->rbroot.rb_node;
299 while (node) {
300 struct iova *iova = container_of(node, struct iova, node);
302 /* If pfn falls within iova's range, return iova */
303 if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
304 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
305 /* We are not holding the lock while this iova
306 * is referenced by the caller as the same thread
307 * which called this function also calls __free_iova()
308 * and it is by design that only one thread can possibly
309 * reference a particular iova and hence no conflict.
311 return iova;
314 if (pfn < iova->pfn_lo)
315 node = node->rb_left;
316 else if (pfn > iova->pfn_lo)
317 node = node->rb_right;
320 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
321 return NULL;
325 * __free_iova - frees the given iova
326 * @iovad: iova domain in question.
327 * @iova: iova in question.
328 * Frees the given iova belonging to the giving domain
330 void
331 __free_iova(struct iova_domain *iovad, struct iova *iova)
333 unsigned long flags;
335 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
336 __cached_rbnode_delete_update(iovad, iova);
337 rb_erase(&iova->node, &iovad->rbroot);
338 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
339 free_iova_mem(iova);
343 * free_iova - finds and frees the iova for a given pfn
344 * @iovad: - iova domain in question.
345 * @pfn: - pfn that is allocated previously
346 * This functions finds an iova for a given pfn and then
347 * frees the iova from that domain.
349 void
350 free_iova(struct iova_domain *iovad, unsigned long pfn)
352 struct iova *iova = find_iova(iovad, pfn);
353 if (iova)
354 __free_iova(iovad, iova);
359 * put_iova_domain - destroys the iova doamin
360 * @iovad: - iova domain in question.
361 * All the iova's in that domain are destroyed.
363 void put_iova_domain(struct iova_domain *iovad)
365 struct rb_node *node;
366 unsigned long flags;
368 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
369 node = rb_first(&iovad->rbroot);
370 while (node) {
371 struct iova *iova = container_of(node, struct iova, node);
372 rb_erase(node, &iovad->rbroot);
373 free_iova_mem(iova);
374 node = rb_first(&iovad->rbroot);
376 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
379 static int
380 __is_range_overlap(struct rb_node *node,
381 unsigned long pfn_lo, unsigned long pfn_hi)
383 struct iova *iova = container_of(node, struct iova, node);
385 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
386 return 1;
387 return 0;
390 static inline struct iova *
391 alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
393 struct iova *iova;
395 iova = alloc_iova_mem();
396 if (iova) {
397 iova->pfn_lo = pfn_lo;
398 iova->pfn_hi = pfn_hi;
401 return iova;
404 static struct iova *
405 __insert_new_range(struct iova_domain *iovad,
406 unsigned long pfn_lo, unsigned long pfn_hi)
408 struct iova *iova;
410 iova = alloc_and_init_iova(pfn_lo, pfn_hi);
411 if (iova)
412 iova_insert_rbtree(&iovad->rbroot, iova);
414 return iova;
417 static void
418 __adjust_overlap_range(struct iova *iova,
419 unsigned long *pfn_lo, unsigned long *pfn_hi)
421 if (*pfn_lo < iova->pfn_lo)
422 iova->pfn_lo = *pfn_lo;
423 if (*pfn_hi > iova->pfn_hi)
424 *pfn_lo = iova->pfn_hi + 1;
428 * reserve_iova - reserves an iova in the given range
429 * @iovad: - iova domain pointer
430 * @pfn_lo: - lower page frame address
431 * @pfn_hi:- higher pfn adderss
432 * This function allocates reserves the address range from pfn_lo to pfn_hi so
433 * that this address is not dished out as part of alloc_iova.
435 struct iova *
436 reserve_iova(struct iova_domain *iovad,
437 unsigned long pfn_lo, unsigned long pfn_hi)
439 struct rb_node *node;
440 unsigned long flags;
441 struct iova *iova;
442 unsigned int overlap = 0;
444 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
445 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
446 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
447 iova = container_of(node, struct iova, node);
448 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
449 if ((pfn_lo >= iova->pfn_lo) &&
450 (pfn_hi <= iova->pfn_hi))
451 goto finish;
452 overlap = 1;
454 } else if (overlap)
455 break;
458 /* We are here either because this is the first reserver node
459 * or need to insert remaining non overlap addr range
461 iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
462 finish:
464 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
465 return iova;
469 * copy_reserved_iova - copies the reserved between domains
470 * @from: - source doamin from where to copy
471 * @to: - destination domin where to copy
472 * This function copies reserved iova's from one doamin to
473 * other.
475 void
476 copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
478 unsigned long flags;
479 struct rb_node *node;
481 spin_lock_irqsave(&from->iova_rbtree_lock, flags);
482 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
483 struct iova *iova = container_of(node, struct iova, node);
484 struct iova *new_iova;
485 new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
486 if (!new_iova)
487 printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
488 iova->pfn_lo, iova->pfn_lo);
490 spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
493 struct iova *
494 split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
495 unsigned long pfn_lo, unsigned long pfn_hi)
497 unsigned long flags;
498 struct iova *prev = NULL, *next = NULL;
500 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
501 if (iova->pfn_lo < pfn_lo) {
502 prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
503 if (prev == NULL)
504 goto error;
506 if (iova->pfn_hi > pfn_hi) {
507 next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
508 if (next == NULL)
509 goto error;
512 __cached_rbnode_delete_update(iovad, iova);
513 rb_erase(&iova->node, &iovad->rbroot);
515 if (prev) {
516 iova_insert_rbtree(&iovad->rbroot, prev);
517 iova->pfn_lo = pfn_lo;
519 if (next) {
520 iova_insert_rbtree(&iovad->rbroot, next);
521 iova->pfn_hi = pfn_hi;
523 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
525 return iova;
527 error:
528 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
529 if (prev)
530 free_iova_mem(prev);
531 return NULL;