x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / drivers / iommu / iova.c
blobb7268a14184f220bef244f38bfcc2d13900c443a
1 /*
2 * Copyright © 2006-2009, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
20 #include <linux/iova.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/smp.h>
24 #include <linux/bitops.h>
26 static bool iova_rcache_insert(struct iova_domain *iovad,
27 unsigned long pfn,
28 unsigned long size);
29 static unsigned long iova_rcache_get(struct iova_domain *iovad,
30 unsigned long size,
31 unsigned long limit_pfn);
32 static void init_iova_rcaches(struct iova_domain *iovad);
33 static void free_iova_rcaches(struct iova_domain *iovad);
35 void
36 init_iova_domain(struct iova_domain *iovad, unsigned long granule,
37 unsigned long start_pfn, unsigned long pfn_32bit)
40 * IOVA granularity will normally be equal to the smallest
41 * supported IOMMU page size; both *must* be capable of
42 * representing individual CPU pages exactly.
44 BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
46 spin_lock_init(&iovad->iova_rbtree_lock);
47 iovad->rbroot = RB_ROOT;
48 iovad->cached32_node = NULL;
49 iovad->granule = granule;
50 iovad->start_pfn = start_pfn;
51 iovad->dma_32bit_pfn = pfn_32bit;
52 init_iova_rcaches(iovad);
54 EXPORT_SYMBOL_GPL(init_iova_domain);
56 static struct rb_node *
57 __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
59 if ((*limit_pfn > iovad->dma_32bit_pfn) ||
60 (iovad->cached32_node == NULL))
61 return rb_last(&iovad->rbroot);
62 else {
63 struct rb_node *prev_node = rb_prev(iovad->cached32_node);
64 struct iova *curr_iova =
65 rb_entry(iovad->cached32_node, struct iova, node);
66 *limit_pfn = curr_iova->pfn_lo - 1;
67 return prev_node;
71 static void
72 __cached_rbnode_insert_update(struct iova_domain *iovad,
73 unsigned long limit_pfn, struct iova *new)
75 if (limit_pfn != iovad->dma_32bit_pfn)
76 return;
77 iovad->cached32_node = &new->node;
80 static void
81 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
83 struct iova *cached_iova;
84 struct rb_node *curr;
86 if (!iovad->cached32_node)
87 return;
88 curr = iovad->cached32_node;
89 cached_iova = rb_entry(curr, struct iova, node);
91 if (free->pfn_lo >= cached_iova->pfn_lo) {
92 struct rb_node *node = rb_next(&free->node);
93 struct iova *iova = rb_entry(node, struct iova, node);
95 /* only cache if it's below 32bit pfn */
96 if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
97 iovad->cached32_node = node;
98 else
99 iovad->cached32_node = NULL;
104 * Computes the padding size required, to make the start address
105 * naturally aligned on the power-of-two order of its size
107 static unsigned int
108 iova_get_pad_size(unsigned int size, unsigned int limit_pfn)
110 return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1);
113 static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
114 unsigned long size, unsigned long limit_pfn,
115 struct iova *new, bool size_aligned)
117 struct rb_node *prev, *curr = NULL;
118 unsigned long flags;
119 unsigned long saved_pfn;
120 unsigned int pad_size = 0;
122 /* Walk the tree backwards */
123 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
124 saved_pfn = limit_pfn;
125 curr = __get_cached_rbnode(iovad, &limit_pfn);
126 prev = curr;
127 while (curr) {
128 struct iova *curr_iova = rb_entry(curr, struct iova, node);
130 if (limit_pfn < curr_iova->pfn_lo)
131 goto move_left;
132 else if (limit_pfn < curr_iova->pfn_hi)
133 goto adjust_limit_pfn;
134 else {
135 if (size_aligned)
136 pad_size = iova_get_pad_size(size, limit_pfn);
137 if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
138 break; /* found a free slot */
140 adjust_limit_pfn:
141 limit_pfn = curr_iova->pfn_lo - 1;
142 move_left:
143 prev = curr;
144 curr = rb_prev(curr);
147 if (!curr) {
148 if (size_aligned)
149 pad_size = iova_get_pad_size(size, limit_pfn);
150 if ((iovad->start_pfn + size + pad_size) > limit_pfn) {
151 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
152 return -ENOMEM;
156 /* pfn_lo will point to size aligned address if size_aligned is set */
157 new->pfn_lo = limit_pfn - (size + pad_size) + 1;
158 new->pfn_hi = new->pfn_lo + size - 1;
160 /* Insert the new_iova into domain rbtree by holding writer lock */
161 /* Add new node and rebalance tree. */
163 struct rb_node **entry, *parent = NULL;
165 /* If we have 'prev', it's a valid place to start the
166 insertion. Otherwise, start from the root. */
167 if (prev)
168 entry = &prev;
169 else
170 entry = &iovad->rbroot.rb_node;
172 /* Figure out where to put new node */
173 while (*entry) {
174 struct iova *this = rb_entry(*entry, struct iova, node);
175 parent = *entry;
177 if (new->pfn_lo < this->pfn_lo)
178 entry = &((*entry)->rb_left);
179 else if (new->pfn_lo > this->pfn_lo)
180 entry = &((*entry)->rb_right);
181 else
182 BUG(); /* this should not happen */
185 /* Add new node and rebalance tree. */
186 rb_link_node(&new->node, parent, entry);
187 rb_insert_color(&new->node, &iovad->rbroot);
189 __cached_rbnode_insert_update(iovad, saved_pfn, new);
191 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
194 return 0;
197 static void
198 iova_insert_rbtree(struct rb_root *root, struct iova *iova)
200 struct rb_node **new = &(root->rb_node), *parent = NULL;
201 /* Figure out where to put new node */
202 while (*new) {
203 struct iova *this = rb_entry(*new, struct iova, node);
205 parent = *new;
207 if (iova->pfn_lo < this->pfn_lo)
208 new = &((*new)->rb_left);
209 else if (iova->pfn_lo > this->pfn_lo)
210 new = &((*new)->rb_right);
211 else
212 BUG(); /* this should not happen */
214 /* Add new node and rebalance tree. */
215 rb_link_node(&iova->node, parent, new);
216 rb_insert_color(&iova->node, root);
219 static struct kmem_cache *iova_cache;
220 static unsigned int iova_cache_users;
221 static DEFINE_MUTEX(iova_cache_mutex);
223 struct iova *alloc_iova_mem(void)
225 return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
227 EXPORT_SYMBOL(alloc_iova_mem);
229 void free_iova_mem(struct iova *iova)
231 kmem_cache_free(iova_cache, iova);
233 EXPORT_SYMBOL(free_iova_mem);
235 int iova_cache_get(void)
237 mutex_lock(&iova_cache_mutex);
238 if (!iova_cache_users) {
239 iova_cache = kmem_cache_create(
240 "iommu_iova", sizeof(struct iova), 0,
241 SLAB_HWCACHE_ALIGN, NULL);
242 if (!iova_cache) {
243 mutex_unlock(&iova_cache_mutex);
244 printk(KERN_ERR "Couldn't create iova cache\n");
245 return -ENOMEM;
249 iova_cache_users++;
250 mutex_unlock(&iova_cache_mutex);
252 return 0;
254 EXPORT_SYMBOL_GPL(iova_cache_get);
256 void iova_cache_put(void)
258 mutex_lock(&iova_cache_mutex);
259 if (WARN_ON(!iova_cache_users)) {
260 mutex_unlock(&iova_cache_mutex);
261 return;
263 iova_cache_users--;
264 if (!iova_cache_users)
265 kmem_cache_destroy(iova_cache);
266 mutex_unlock(&iova_cache_mutex);
268 EXPORT_SYMBOL_GPL(iova_cache_put);
271 * alloc_iova - allocates an iova
272 * @iovad: - iova domain in question
273 * @size: - size of page frames to allocate
274 * @limit_pfn: - max limit address
275 * @size_aligned: - set if size_aligned address range is required
276 * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
277 * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
278 * flag is set then the allocated address iova->pfn_lo will be naturally
279 * aligned on roundup_power_of_two(size).
281 struct iova *
282 alloc_iova(struct iova_domain *iovad, unsigned long size,
283 unsigned long limit_pfn,
284 bool size_aligned)
286 struct iova *new_iova;
287 int ret;
289 new_iova = alloc_iova_mem();
290 if (!new_iova)
291 return NULL;
293 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
294 new_iova, size_aligned);
296 if (ret) {
297 free_iova_mem(new_iova);
298 return NULL;
301 return new_iova;
303 EXPORT_SYMBOL_GPL(alloc_iova);
305 static struct iova *
306 private_find_iova(struct iova_domain *iovad, unsigned long pfn)
308 struct rb_node *node = iovad->rbroot.rb_node;
310 assert_spin_locked(&iovad->iova_rbtree_lock);
312 while (node) {
313 struct iova *iova = rb_entry(node, struct iova, node);
315 /* If pfn falls within iova's range, return iova */
316 if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
317 return iova;
320 if (pfn < iova->pfn_lo)
321 node = node->rb_left;
322 else if (pfn > iova->pfn_lo)
323 node = node->rb_right;
326 return NULL;
329 static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
331 assert_spin_locked(&iovad->iova_rbtree_lock);
332 __cached_rbnode_delete_update(iovad, iova);
333 rb_erase(&iova->node, &iovad->rbroot);
334 free_iova_mem(iova);
338 * find_iova - finds an iova for a given pfn
339 * @iovad: - iova domain in question.
340 * @pfn: - page frame number
341 * This function finds and returns an iova belonging to the
342 * given doamin which matches the given pfn.
344 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
346 unsigned long flags;
347 struct iova *iova;
349 /* Take the lock so that no other thread is manipulating the rbtree */
350 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
351 iova = private_find_iova(iovad, pfn);
352 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
353 return iova;
355 EXPORT_SYMBOL_GPL(find_iova);
358 * __free_iova - frees the given iova
359 * @iovad: iova domain in question.
360 * @iova: iova in question.
361 * Frees the given iova belonging to the giving domain
363 void
364 __free_iova(struct iova_domain *iovad, struct iova *iova)
366 unsigned long flags;
368 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
369 private_free_iova(iovad, iova);
370 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
372 EXPORT_SYMBOL_GPL(__free_iova);
375 * free_iova - finds and frees the iova for a given pfn
376 * @iovad: - iova domain in question.
377 * @pfn: - pfn that is allocated previously
378 * This functions finds an iova for a given pfn and then
379 * frees the iova from that domain.
381 void
382 free_iova(struct iova_domain *iovad, unsigned long pfn)
384 struct iova *iova = find_iova(iovad, pfn);
386 if (iova)
387 __free_iova(iovad, iova);
390 EXPORT_SYMBOL_GPL(free_iova);
393 * alloc_iova_fast - allocates an iova from rcache
394 * @iovad: - iova domain in question
395 * @size: - size of page frames to allocate
396 * @limit_pfn: - max limit address
397 * This function tries to satisfy an iova allocation from the rcache,
398 * and falls back to regular allocation on failure.
400 unsigned long
401 alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
402 unsigned long limit_pfn)
404 bool flushed_rcache = false;
405 unsigned long iova_pfn;
406 struct iova *new_iova;
408 iova_pfn = iova_rcache_get(iovad, size, limit_pfn);
409 if (iova_pfn)
410 return iova_pfn;
412 retry:
413 new_iova = alloc_iova(iovad, size, limit_pfn, true);
414 if (!new_iova) {
415 unsigned int cpu;
417 if (flushed_rcache)
418 return 0;
420 /* Try replenishing IOVAs by flushing rcache. */
421 flushed_rcache = true;
422 preempt_disable();
423 for_each_online_cpu(cpu)
424 free_cpu_cached_iovas(cpu, iovad);
425 preempt_enable();
426 goto retry;
429 return new_iova->pfn_lo;
431 EXPORT_SYMBOL_GPL(alloc_iova_fast);
434 * free_iova_fast - free iova pfn range into rcache
435 * @iovad: - iova domain in question.
436 * @pfn: - pfn that is allocated previously
437 * @size: - # of pages in range
438 * This functions frees an iova range by trying to put it into the rcache,
439 * falling back to regular iova deallocation via free_iova() if this fails.
441 void
442 free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
444 if (iova_rcache_insert(iovad, pfn, size))
445 return;
447 free_iova(iovad, pfn);
449 EXPORT_SYMBOL_GPL(free_iova_fast);
452 * put_iova_domain - destroys the iova doamin
453 * @iovad: - iova domain in question.
454 * All the iova's in that domain are destroyed.
456 void put_iova_domain(struct iova_domain *iovad)
458 struct rb_node *node;
459 unsigned long flags;
461 free_iova_rcaches(iovad);
462 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
463 node = rb_first(&iovad->rbroot);
464 while (node) {
465 struct iova *iova = rb_entry(node, struct iova, node);
467 rb_erase(node, &iovad->rbroot);
468 free_iova_mem(iova);
469 node = rb_first(&iovad->rbroot);
471 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
473 EXPORT_SYMBOL_GPL(put_iova_domain);
475 static int
476 __is_range_overlap(struct rb_node *node,
477 unsigned long pfn_lo, unsigned long pfn_hi)
479 struct iova *iova = rb_entry(node, struct iova, node);
481 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
482 return 1;
483 return 0;
486 static inline struct iova *
487 alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
489 struct iova *iova;
491 iova = alloc_iova_mem();
492 if (iova) {
493 iova->pfn_lo = pfn_lo;
494 iova->pfn_hi = pfn_hi;
497 return iova;
500 static struct iova *
501 __insert_new_range(struct iova_domain *iovad,
502 unsigned long pfn_lo, unsigned long pfn_hi)
504 struct iova *iova;
506 iova = alloc_and_init_iova(pfn_lo, pfn_hi);
507 if (iova)
508 iova_insert_rbtree(&iovad->rbroot, iova);
510 return iova;
513 static void
514 __adjust_overlap_range(struct iova *iova,
515 unsigned long *pfn_lo, unsigned long *pfn_hi)
517 if (*pfn_lo < iova->pfn_lo)
518 iova->pfn_lo = *pfn_lo;
519 if (*pfn_hi > iova->pfn_hi)
520 *pfn_lo = iova->pfn_hi + 1;
524 * reserve_iova - reserves an iova in the given range
525 * @iovad: - iova domain pointer
526 * @pfn_lo: - lower page frame address
527 * @pfn_hi:- higher pfn adderss
528 * This function allocates reserves the address range from pfn_lo to pfn_hi so
529 * that this address is not dished out as part of alloc_iova.
531 struct iova *
532 reserve_iova(struct iova_domain *iovad,
533 unsigned long pfn_lo, unsigned long pfn_hi)
535 struct rb_node *node;
536 unsigned long flags;
537 struct iova *iova;
538 unsigned int overlap = 0;
540 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
541 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
542 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
543 iova = rb_entry(node, struct iova, node);
544 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
545 if ((pfn_lo >= iova->pfn_lo) &&
546 (pfn_hi <= iova->pfn_hi))
547 goto finish;
548 overlap = 1;
550 } else if (overlap)
551 break;
554 /* We are here either because this is the first reserver node
555 * or need to insert remaining non overlap addr range
557 iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
558 finish:
560 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
561 return iova;
563 EXPORT_SYMBOL_GPL(reserve_iova);
566 * copy_reserved_iova - copies the reserved between domains
567 * @from: - source doamin from where to copy
568 * @to: - destination domin where to copy
569 * This function copies reserved iova's from one doamin to
570 * other.
572 void
573 copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
575 unsigned long flags;
576 struct rb_node *node;
578 spin_lock_irqsave(&from->iova_rbtree_lock, flags);
579 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
580 struct iova *iova = rb_entry(node, struct iova, node);
581 struct iova *new_iova;
583 new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
584 if (!new_iova)
585 printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
586 iova->pfn_lo, iova->pfn_lo);
588 spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
590 EXPORT_SYMBOL_GPL(copy_reserved_iova);
592 struct iova *
593 split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
594 unsigned long pfn_lo, unsigned long pfn_hi)
596 unsigned long flags;
597 struct iova *prev = NULL, *next = NULL;
599 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
600 if (iova->pfn_lo < pfn_lo) {
601 prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
602 if (prev == NULL)
603 goto error;
605 if (iova->pfn_hi > pfn_hi) {
606 next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
607 if (next == NULL)
608 goto error;
611 __cached_rbnode_delete_update(iovad, iova);
612 rb_erase(&iova->node, &iovad->rbroot);
614 if (prev) {
615 iova_insert_rbtree(&iovad->rbroot, prev);
616 iova->pfn_lo = pfn_lo;
618 if (next) {
619 iova_insert_rbtree(&iovad->rbroot, next);
620 iova->pfn_hi = pfn_hi;
622 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
624 return iova;
626 error:
627 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
628 if (prev)
629 free_iova_mem(prev);
630 return NULL;
634 * Magazine caches for IOVA ranges. For an introduction to magazines,
635 * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
636 * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams.
637 * For simplicity, we use a static magazine size and don't implement the
638 * dynamic size tuning described in the paper.
641 #define IOVA_MAG_SIZE 128
643 struct iova_magazine {
644 unsigned long size;
645 unsigned long pfns[IOVA_MAG_SIZE];
648 struct iova_cpu_rcache {
649 spinlock_t lock;
650 struct iova_magazine *loaded;
651 struct iova_magazine *prev;
654 static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
656 return kzalloc(sizeof(struct iova_magazine), flags);
659 static void iova_magazine_free(struct iova_magazine *mag)
661 kfree(mag);
664 static void
665 iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
667 unsigned long flags;
668 int i;
670 if (!mag)
671 return;
673 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
675 for (i = 0 ; i < mag->size; ++i) {
676 struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
678 BUG_ON(!iova);
679 private_free_iova(iovad, iova);
682 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
684 mag->size = 0;
687 static bool iova_magazine_full(struct iova_magazine *mag)
689 return (mag && mag->size == IOVA_MAG_SIZE);
692 static bool iova_magazine_empty(struct iova_magazine *mag)
694 return (!mag || mag->size == 0);
697 static unsigned long iova_magazine_pop(struct iova_magazine *mag,
698 unsigned long limit_pfn)
700 BUG_ON(iova_magazine_empty(mag));
702 if (mag->pfns[mag->size - 1] >= limit_pfn)
703 return 0;
705 return mag->pfns[--mag->size];
708 static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
710 BUG_ON(iova_magazine_full(mag));
712 mag->pfns[mag->size++] = pfn;
715 static void init_iova_rcaches(struct iova_domain *iovad)
717 struct iova_cpu_rcache *cpu_rcache;
718 struct iova_rcache *rcache;
719 unsigned int cpu;
720 int i;
722 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
723 rcache = &iovad->rcaches[i];
724 spin_lock_init(&rcache->lock);
725 rcache->depot_size = 0;
726 rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
727 if (WARN_ON(!rcache->cpu_rcaches))
728 continue;
729 for_each_possible_cpu(cpu) {
730 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
731 spin_lock_init(&cpu_rcache->lock);
732 cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
733 cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
739 * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
740 * return true on success. Can fail if rcache is full and we can't free
741 * space, and free_iova() (our only caller) will then return the IOVA
742 * range to the rbtree instead.
744 static bool __iova_rcache_insert(struct iova_domain *iovad,
745 struct iova_rcache *rcache,
746 unsigned long iova_pfn)
748 struct iova_magazine *mag_to_free = NULL;
749 struct iova_cpu_rcache *cpu_rcache;
750 bool can_insert = false;
751 unsigned long flags;
753 cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
754 spin_lock_irqsave(&cpu_rcache->lock, flags);
756 if (!iova_magazine_full(cpu_rcache->loaded)) {
757 can_insert = true;
758 } else if (!iova_magazine_full(cpu_rcache->prev)) {
759 swap(cpu_rcache->prev, cpu_rcache->loaded);
760 can_insert = true;
761 } else {
762 struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC);
764 if (new_mag) {
765 spin_lock(&rcache->lock);
766 if (rcache->depot_size < MAX_GLOBAL_MAGS) {
767 rcache->depot[rcache->depot_size++] =
768 cpu_rcache->loaded;
769 } else {
770 mag_to_free = cpu_rcache->loaded;
772 spin_unlock(&rcache->lock);
774 cpu_rcache->loaded = new_mag;
775 can_insert = true;
779 if (can_insert)
780 iova_magazine_push(cpu_rcache->loaded, iova_pfn);
782 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
783 put_cpu_ptr(rcache->cpu_rcaches);
785 if (mag_to_free) {
786 iova_magazine_free_pfns(mag_to_free, iovad);
787 iova_magazine_free(mag_to_free);
790 return can_insert;
793 static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
794 unsigned long size)
796 unsigned int log_size = order_base_2(size);
798 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
799 return false;
801 return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
805 * Caller wants to allocate a new IOVA range from 'rcache'. If we can
806 * satisfy the request, return a matching non-NULL range and remove
807 * it from the 'rcache'.
809 static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
810 unsigned long limit_pfn)
812 struct iova_cpu_rcache *cpu_rcache;
813 unsigned long iova_pfn = 0;
814 bool has_pfn = false;
815 unsigned long flags;
817 cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
818 spin_lock_irqsave(&cpu_rcache->lock, flags);
820 if (!iova_magazine_empty(cpu_rcache->loaded)) {
821 has_pfn = true;
822 } else if (!iova_magazine_empty(cpu_rcache->prev)) {
823 swap(cpu_rcache->prev, cpu_rcache->loaded);
824 has_pfn = true;
825 } else {
826 spin_lock(&rcache->lock);
827 if (rcache->depot_size > 0) {
828 iova_magazine_free(cpu_rcache->loaded);
829 cpu_rcache->loaded = rcache->depot[--rcache->depot_size];
830 has_pfn = true;
832 spin_unlock(&rcache->lock);
835 if (has_pfn)
836 iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
838 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
839 put_cpu_ptr(rcache->cpu_rcaches);
841 return iova_pfn;
845 * Try to satisfy IOVA allocation range from rcache. Fail if requested
846 * size is too big or the DMA limit we are given isn't satisfied by the
847 * top element in the magazine.
849 static unsigned long iova_rcache_get(struct iova_domain *iovad,
850 unsigned long size,
851 unsigned long limit_pfn)
853 unsigned int log_size = order_base_2(size);
855 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
856 return 0;
858 return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn);
862 * Free a cpu's rcache.
864 static void free_cpu_iova_rcache(unsigned int cpu, struct iova_domain *iovad,
865 struct iova_rcache *rcache)
867 struct iova_cpu_rcache *cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
868 unsigned long flags;
870 spin_lock_irqsave(&cpu_rcache->lock, flags);
872 iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
873 iova_magazine_free(cpu_rcache->loaded);
875 iova_magazine_free_pfns(cpu_rcache->prev, iovad);
876 iova_magazine_free(cpu_rcache->prev);
878 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
882 * free rcache data structures.
884 static void free_iova_rcaches(struct iova_domain *iovad)
886 struct iova_rcache *rcache;
887 unsigned long flags;
888 unsigned int cpu;
889 int i, j;
891 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
892 rcache = &iovad->rcaches[i];
893 for_each_possible_cpu(cpu)
894 free_cpu_iova_rcache(cpu, iovad, rcache);
895 spin_lock_irqsave(&rcache->lock, flags);
896 free_percpu(rcache->cpu_rcaches);
897 for (j = 0; j < rcache->depot_size; ++j) {
898 iova_magazine_free_pfns(rcache->depot[j], iovad);
899 iova_magazine_free(rcache->depot[j]);
901 spin_unlock_irqrestore(&rcache->lock, flags);
906 * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
908 void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
910 struct iova_cpu_rcache *cpu_rcache;
911 struct iova_rcache *rcache;
912 unsigned long flags;
913 int i;
915 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
916 rcache = &iovad->rcaches[i];
917 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
918 spin_lock_irqsave(&cpu_rcache->lock, flags);
919 iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
920 iova_magazine_free_pfns(cpu_rcache->prev, iovad);
921 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
925 MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
926 MODULE_LICENSE("GPL");