iwlwifi: introduce host commands callbacks
[linux/fpc-iii.git] / drivers / pci / iova.c
blobdbcdd6bfa63a60e1c87b1fee1aedfac89ddb07bd
1 /*
2 * Copyright (c) 2006, Intel Corporation.
4 * This file is released under the GPLv2.
6 * Copyright (C) 2006-2008 Intel Corporation
7 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 */
10 #include "iova.h"
12 void
13 init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
15 spin_lock_init(&iovad->iova_alloc_lock);
16 spin_lock_init(&iovad->iova_rbtree_lock);
17 iovad->rbroot = RB_ROOT;
18 iovad->cached32_node = NULL;
19 iovad->dma_32bit_pfn = pfn_32bit;
22 static struct rb_node *
23 __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
25 if ((*limit_pfn != iovad->dma_32bit_pfn) ||
26 (iovad->cached32_node == NULL))
27 return rb_last(&iovad->rbroot);
28 else {
29 struct rb_node *prev_node = rb_prev(iovad->cached32_node);
30 struct iova *curr_iova =
31 container_of(iovad->cached32_node, struct iova, node);
32 *limit_pfn = curr_iova->pfn_lo - 1;
33 return prev_node;
37 static void
38 __cached_rbnode_insert_update(struct iova_domain *iovad,
39 unsigned long limit_pfn, struct iova *new)
41 if (limit_pfn != iovad->dma_32bit_pfn)
42 return;
43 iovad->cached32_node = &new->node;
46 static void
47 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
49 struct iova *cached_iova;
50 struct rb_node *curr;
52 if (!iovad->cached32_node)
53 return;
54 curr = iovad->cached32_node;
55 cached_iova = container_of(curr, struct iova, node);
57 if (free->pfn_lo >= cached_iova->pfn_lo)
58 iovad->cached32_node = rb_next(&free->node);
61 /* Computes the padding size required, to make the
62 * the start address naturally aligned on its size
64 static int
65 iova_get_pad_size(int size, unsigned int limit_pfn)
67 unsigned int pad_size = 0;
68 unsigned int order = ilog2(size);
70 if (order)
71 pad_size = (limit_pfn + 1) % (1 << order);
73 return pad_size;
76 static int __alloc_iova_range(struct iova_domain *iovad, unsigned long size,
77 unsigned long limit_pfn, struct iova *new, bool size_aligned)
79 struct rb_node *curr = NULL;
80 unsigned long flags;
81 unsigned long saved_pfn;
82 unsigned int pad_size = 0;
84 /* Walk the tree backwards */
85 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
86 saved_pfn = limit_pfn;
87 curr = __get_cached_rbnode(iovad, &limit_pfn);
88 while (curr) {
89 struct iova *curr_iova = container_of(curr, struct iova, node);
90 if (limit_pfn < curr_iova->pfn_lo)
91 goto move_left;
92 else if (limit_pfn < curr_iova->pfn_hi)
93 goto adjust_limit_pfn;
94 else {
95 if (size_aligned)
96 pad_size = iova_get_pad_size(size, limit_pfn);
97 if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
98 break; /* found a free slot */
100 adjust_limit_pfn:
101 limit_pfn = curr_iova->pfn_lo - 1;
102 move_left:
103 curr = rb_prev(curr);
106 if (!curr) {
107 if (size_aligned)
108 pad_size = iova_get_pad_size(size, limit_pfn);
109 if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
110 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
111 return -ENOMEM;
115 /* pfn_lo will point to size aligned address if size_aligned is set */
116 new->pfn_lo = limit_pfn - (size + pad_size) + 1;
117 new->pfn_hi = new->pfn_lo + size - 1;
119 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
120 return 0;
123 static void
124 iova_insert_rbtree(struct rb_root *root, struct iova *iova)
126 struct rb_node **new = &(root->rb_node), *parent = NULL;
127 /* Figure out where to put new node */
128 while (*new) {
129 struct iova *this = container_of(*new, struct iova, node);
130 parent = *new;
132 if (iova->pfn_lo < this->pfn_lo)
133 new = &((*new)->rb_left);
134 else if (iova->pfn_lo > this->pfn_lo)
135 new = &((*new)->rb_right);
136 else
137 BUG(); /* this should not happen */
139 /* Add new node and rebalance tree. */
140 rb_link_node(&iova->node, parent, new);
141 rb_insert_color(&iova->node, root);
145 * alloc_iova - allocates an iova
146 * @iovad - iova domain in question
147 * @size - size of page frames to allocate
148 * @limit_pfn - max limit address
149 * @size_aligned - set if size_aligned address range is required
150 * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
151 * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
152 * flag is set then the allocated address iova->pfn_lo will be naturally
153 * aligned on roundup_power_of_two(size).
155 struct iova *
156 alloc_iova(struct iova_domain *iovad, unsigned long size,
157 unsigned long limit_pfn,
158 bool size_aligned)
160 unsigned long flags;
161 struct iova *new_iova;
162 int ret;
164 new_iova = alloc_iova_mem();
165 if (!new_iova)
166 return NULL;
168 /* If size aligned is set then round the size to
169 * to next power of two.
171 if (size_aligned)
172 size = __roundup_pow_of_two(size);
174 spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
175 ret = __alloc_iova_range(iovad, size, limit_pfn, new_iova,
176 size_aligned);
178 if (ret) {
179 spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
180 free_iova_mem(new_iova);
181 return NULL;
184 /* Insert the new_iova into domain rbtree by holding writer lock */
185 spin_lock(&iovad->iova_rbtree_lock);
186 iova_insert_rbtree(&iovad->rbroot, new_iova);
187 __cached_rbnode_insert_update(iovad, limit_pfn, new_iova);
188 spin_unlock(&iovad->iova_rbtree_lock);
190 spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
192 return new_iova;
196 * find_iova - find's an iova for a given pfn
197 * @iovad - iova domain in question.
198 * pfn - page frame number
199 * This function finds and returns an iova belonging to the
200 * given doamin which matches the given pfn.
202 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
204 unsigned long flags;
205 struct rb_node *node;
207 /* Take the lock so that no other thread is manipulating the rbtree */
208 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
209 node = iovad->rbroot.rb_node;
210 while (node) {
211 struct iova *iova = container_of(node, struct iova, node);
213 /* If pfn falls within iova's range, return iova */
214 if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
215 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
216 /* We are not holding the lock while this iova
217 * is referenced by the caller as the same thread
218 * which called this function also calls __free_iova()
219 * and it is by desing that only one thread can possibly
220 * reference a particular iova and hence no conflict.
222 return iova;
225 if (pfn < iova->pfn_lo)
226 node = node->rb_left;
227 else if (pfn > iova->pfn_lo)
228 node = node->rb_right;
231 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
232 return NULL;
236 * __free_iova - frees the given iova
237 * @iovad: iova domain in question.
238 * @iova: iova in question.
239 * Frees the given iova belonging to the giving domain
241 void
242 __free_iova(struct iova_domain *iovad, struct iova *iova)
244 unsigned long flags;
246 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
247 __cached_rbnode_delete_update(iovad, iova);
248 rb_erase(&iova->node, &iovad->rbroot);
249 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
250 free_iova_mem(iova);
254 * free_iova - finds and frees the iova for a given pfn
255 * @iovad: - iova domain in question.
256 * @pfn: - pfn that is allocated previously
257 * This functions finds an iova for a given pfn and then
258 * frees the iova from that domain.
260 void
261 free_iova(struct iova_domain *iovad, unsigned long pfn)
263 struct iova *iova = find_iova(iovad, pfn);
264 if (iova)
265 __free_iova(iovad, iova);
270 * put_iova_domain - destroys the iova doamin
271 * @iovad: - iova domain in question.
272 * All the iova's in that domain are destroyed.
274 void put_iova_domain(struct iova_domain *iovad)
276 struct rb_node *node;
277 unsigned long flags;
279 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
280 node = rb_first(&iovad->rbroot);
281 while (node) {
282 struct iova *iova = container_of(node, struct iova, node);
283 rb_erase(node, &iovad->rbroot);
284 free_iova_mem(iova);
285 node = rb_first(&iovad->rbroot);
287 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
290 static int
291 __is_range_overlap(struct rb_node *node,
292 unsigned long pfn_lo, unsigned long pfn_hi)
294 struct iova *iova = container_of(node, struct iova, node);
296 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
297 return 1;
298 return 0;
301 static struct iova *
302 __insert_new_range(struct iova_domain *iovad,
303 unsigned long pfn_lo, unsigned long pfn_hi)
305 struct iova *iova;
307 iova = alloc_iova_mem();
308 if (!iova)
309 return iova;
311 iova->pfn_hi = pfn_hi;
312 iova->pfn_lo = pfn_lo;
313 iova_insert_rbtree(&iovad->rbroot, iova);
314 return iova;
317 static void
318 __adjust_overlap_range(struct iova *iova,
319 unsigned long *pfn_lo, unsigned long *pfn_hi)
321 if (*pfn_lo < iova->pfn_lo)
322 iova->pfn_lo = *pfn_lo;
323 if (*pfn_hi > iova->pfn_hi)
324 *pfn_lo = iova->pfn_hi + 1;
328 * reserve_iova - reserves an iova in the given range
329 * @iovad: - iova domain pointer
330 * @pfn_lo: - lower page frame address
331 * @pfn_hi:- higher pfn adderss
332 * This function allocates reserves the address range from pfn_lo to pfn_hi so
333 * that this address is not dished out as part of alloc_iova.
335 struct iova *
336 reserve_iova(struct iova_domain *iovad,
337 unsigned long pfn_lo, unsigned long pfn_hi)
339 struct rb_node *node;
340 unsigned long flags;
341 struct iova *iova;
342 unsigned int overlap = 0;
344 spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
345 spin_lock(&iovad->iova_rbtree_lock);
346 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
347 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
348 iova = container_of(node, struct iova, node);
349 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
350 if ((pfn_lo >= iova->pfn_lo) &&
351 (pfn_hi <= iova->pfn_hi))
352 goto finish;
353 overlap = 1;
355 } else if (overlap)
356 break;
359 /* We are here either becasue this is the first reserver node
360 * or need to insert remaining non overlap addr range
362 iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
363 finish:
365 spin_unlock(&iovad->iova_rbtree_lock);
366 spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
367 return iova;
371 * copy_reserved_iova - copies the reserved between domains
372 * @from: - source doamin from where to copy
373 * @to: - destination domin where to copy
374 * This function copies reserved iova's from one doamin to
375 * other.
377 void
378 copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
380 unsigned long flags;
381 struct rb_node *node;
383 spin_lock_irqsave(&from->iova_alloc_lock, flags);
384 spin_lock(&from->iova_rbtree_lock);
385 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
386 struct iova *iova = container_of(node, struct iova, node);
387 struct iova *new_iova;
388 new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
389 if (!new_iova)
390 printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
391 iova->pfn_lo, iova->pfn_lo);
393 spin_unlock(&from->iova_rbtree_lock);
394 spin_unlock_irqrestore(&from->iova_alloc_lock, flags);