WIP FPC-III support
[linux/fpc-iii.git] / drivers / net / ethernet / mellanox / mlx4 / alloc.c
blobb330020dc0d674ee103c0651a0d48e8f7a2ea0b2
1 /*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
34 #include <linux/errno.h>
35 #include <linux/slab.h>
36 #include <linux/mm.h>
37 #include <linux/export.h>
38 #include <linux/bitmap.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/vmalloc.h>
42 #include "mlx4.h"
44 u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
46 u32 obj;
48 spin_lock(&bitmap->lock);
50 obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
51 if (obj >= bitmap->max) {
52 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
53 & bitmap->mask;
54 obj = find_first_zero_bit(bitmap->table, bitmap->max);
57 if (obj < bitmap->max) {
58 set_bit(obj, bitmap->table);
59 bitmap->last = (obj + 1);
60 if (bitmap->last == bitmap->max)
61 bitmap->last = 0;
62 obj |= bitmap->top;
63 } else
64 obj = -1;
66 if (obj != -1)
67 --bitmap->avail;
69 spin_unlock(&bitmap->lock);
71 return obj;
74 void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr)
76 mlx4_bitmap_free_range(bitmap, obj, 1, use_rr);
79 static unsigned long find_aligned_range(unsigned long *bitmap,
80 u32 start, u32 nbits,
81 int len, int align, u32 skip_mask)
83 unsigned long end, i;
85 again:
86 start = ALIGN(start, align);
88 while ((start < nbits) && (test_bit(start, bitmap) ||
89 (start & skip_mask)))
90 start += align;
92 if (start >= nbits)
93 return -1;
95 end = start+len;
96 if (end > nbits)
97 return -1;
99 for (i = start + 1; i < end; i++) {
100 if (test_bit(i, bitmap) || ((u32)i & skip_mask)) {
101 start = i + 1;
102 goto again;
106 return start;
109 u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt,
110 int align, u32 skip_mask)
112 u32 obj;
114 if (likely(cnt == 1 && align == 1 && !skip_mask))
115 return mlx4_bitmap_alloc(bitmap);
117 spin_lock(&bitmap->lock);
119 obj = find_aligned_range(bitmap->table, bitmap->last,
120 bitmap->max, cnt, align, skip_mask);
121 if (obj >= bitmap->max) {
122 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
123 & bitmap->mask;
124 obj = find_aligned_range(bitmap->table, 0, bitmap->max,
125 cnt, align, skip_mask);
128 if (obj < bitmap->max) {
129 bitmap_set(bitmap->table, obj, cnt);
130 if (obj == bitmap->last) {
131 bitmap->last = (obj + cnt);
132 if (bitmap->last >= bitmap->max)
133 bitmap->last = 0;
135 obj |= bitmap->top;
136 } else
137 obj = -1;
139 if (obj != -1)
140 bitmap->avail -= cnt;
142 spin_unlock(&bitmap->lock);
144 return obj;
147 u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
149 return bitmap->avail;
152 static u32 mlx4_bitmap_masked_value(struct mlx4_bitmap *bitmap, u32 obj)
154 return obj & (bitmap->max + bitmap->reserved_top - 1);
157 void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
158 int use_rr)
160 obj &= bitmap->max + bitmap->reserved_top - 1;
162 spin_lock(&bitmap->lock);
163 if (!use_rr) {
164 bitmap->last = min(bitmap->last, obj);
165 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
166 & bitmap->mask;
168 bitmap_clear(bitmap->table, obj, cnt);
169 bitmap->avail += cnt;
170 spin_unlock(&bitmap->lock);
173 int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
174 u32 reserved_bot, u32 reserved_top)
176 /* num must be a power of 2 */
177 if (num != roundup_pow_of_two(num))
178 return -EINVAL;
180 bitmap->last = 0;
181 bitmap->top = 0;
182 bitmap->max = num - reserved_top;
183 bitmap->mask = mask;
184 bitmap->reserved_top = reserved_top;
185 bitmap->avail = num - reserved_top - reserved_bot;
186 bitmap->effective_len = bitmap->avail;
187 spin_lock_init(&bitmap->lock);
188 bitmap->table = bitmap_zalloc(bitmap->max, GFP_KERNEL);
189 if (!bitmap->table)
190 return -ENOMEM;
192 bitmap_set(bitmap->table, 0, reserved_bot);
194 return 0;
197 void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
199 bitmap_free(bitmap->table);
202 struct mlx4_zone_allocator {
203 struct list_head entries;
204 struct list_head prios;
205 u32 last_uid;
206 u32 mask;
207 /* protect the zone_allocator from concurrent accesses */
208 spinlock_t lock;
209 enum mlx4_zone_alloc_flags flags;
212 struct mlx4_zone_entry {
213 struct list_head list;
214 struct list_head prio_list;
215 u32 uid;
216 struct mlx4_zone_allocator *allocator;
217 struct mlx4_bitmap *bitmap;
218 int use_rr;
219 int priority;
220 int offset;
221 enum mlx4_zone_flags flags;
224 struct mlx4_zone_allocator *mlx4_zone_allocator_create(enum mlx4_zone_alloc_flags flags)
226 struct mlx4_zone_allocator *zones = kmalloc(sizeof(*zones), GFP_KERNEL);
228 if (NULL == zones)
229 return NULL;
231 INIT_LIST_HEAD(&zones->entries);
232 INIT_LIST_HEAD(&zones->prios);
233 spin_lock_init(&zones->lock);
234 zones->last_uid = 0;
235 zones->mask = 0;
236 zones->flags = flags;
238 return zones;
241 int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc,
242 struct mlx4_bitmap *bitmap,
243 u32 flags,
244 int priority,
245 int offset,
246 u32 *puid)
248 u32 mask = mlx4_bitmap_masked_value(bitmap, (u32)-1);
249 struct mlx4_zone_entry *it;
250 struct mlx4_zone_entry *zone = kmalloc(sizeof(*zone), GFP_KERNEL);
252 if (NULL == zone)
253 return -ENOMEM;
255 zone->flags = flags;
256 zone->bitmap = bitmap;
257 zone->use_rr = (flags & MLX4_ZONE_USE_RR) ? MLX4_USE_RR : 0;
258 zone->priority = priority;
259 zone->offset = offset;
261 spin_lock(&zone_alloc->lock);
263 zone->uid = zone_alloc->last_uid++;
264 zone->allocator = zone_alloc;
266 if (zone_alloc->mask < mask)
267 zone_alloc->mask = mask;
269 list_for_each_entry(it, &zone_alloc->prios, prio_list)
270 if (it->priority >= priority)
271 break;
273 if (&it->prio_list == &zone_alloc->prios || it->priority > priority)
274 list_add_tail(&zone->prio_list, &it->prio_list);
275 list_add_tail(&zone->list, &it->list);
277 spin_unlock(&zone_alloc->lock);
279 *puid = zone->uid;
281 return 0;
284 /* Should be called under a lock */
285 static void __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry)
287 struct mlx4_zone_allocator *zone_alloc = entry->allocator;
289 if (!list_empty(&entry->prio_list)) {
290 /* Check if we need to add an alternative node to the prio list */
291 if (!list_is_last(&entry->list, &zone_alloc->entries)) {
292 struct mlx4_zone_entry *next = list_first_entry(&entry->list,
293 typeof(*next),
294 list);
296 if (next->priority == entry->priority)
297 list_add_tail(&next->prio_list, &entry->prio_list);
300 list_del(&entry->prio_list);
303 list_del(&entry->list);
305 if (zone_alloc->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP) {
306 u32 mask = 0;
307 struct mlx4_zone_entry *it;
309 list_for_each_entry(it, &zone_alloc->prios, prio_list) {
310 u32 cur_mask = mlx4_bitmap_masked_value(it->bitmap, (u32)-1);
312 if (mask < cur_mask)
313 mask = cur_mask;
315 zone_alloc->mask = mask;
319 void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc)
321 struct mlx4_zone_entry *zone, *tmp;
323 spin_lock(&zone_alloc->lock);
325 list_for_each_entry_safe(zone, tmp, &zone_alloc->entries, list) {
326 list_del(&zone->list);
327 list_del(&zone->prio_list);
328 kfree(zone);
331 spin_unlock(&zone_alloc->lock);
332 kfree(zone_alloc);
335 /* Should be called under a lock */
336 static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count,
337 int align, u32 skip_mask, u32 *puid)
339 u32 uid = 0;
340 u32 res;
341 struct mlx4_zone_allocator *zone_alloc = zone->allocator;
342 struct mlx4_zone_entry *curr_node;
344 res = mlx4_bitmap_alloc_range(zone->bitmap, count,
345 align, skip_mask);
347 if (res != (u32)-1) {
348 res += zone->offset;
349 uid = zone->uid;
350 goto out;
353 list_for_each_entry(curr_node, &zone_alloc->prios, prio_list) {
354 if (unlikely(curr_node->priority == zone->priority))
355 break;
358 if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO) {
359 struct mlx4_zone_entry *it = curr_node;
361 list_for_each_entry_continue_reverse(it, &zone_alloc->entries, list) {
362 res = mlx4_bitmap_alloc_range(it->bitmap, count,
363 align, skip_mask);
364 if (res != (u32)-1) {
365 res += it->offset;
366 uid = it->uid;
367 goto out;
372 if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO) {
373 struct mlx4_zone_entry *it = curr_node;
375 list_for_each_entry_from(it, &zone_alloc->entries, list) {
376 if (unlikely(it == zone))
377 continue;
379 if (unlikely(it->priority != curr_node->priority))
380 break;
382 res = mlx4_bitmap_alloc_range(it->bitmap, count,
383 align, skip_mask);
384 if (res != (u32)-1) {
385 res += it->offset;
386 uid = it->uid;
387 goto out;
392 if (zone->flags & MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO) {
393 if (list_is_last(&curr_node->prio_list, &zone_alloc->prios))
394 goto out;
396 curr_node = list_first_entry(&curr_node->prio_list,
397 typeof(*curr_node),
398 prio_list);
400 list_for_each_entry_from(curr_node, &zone_alloc->entries, list) {
401 res = mlx4_bitmap_alloc_range(curr_node->bitmap, count,
402 align, skip_mask);
403 if (res != (u32)-1) {
404 res += curr_node->offset;
405 uid = curr_node->uid;
406 goto out;
411 out:
412 if (NULL != puid && res != (u32)-1)
413 *puid = uid;
414 return res;
417 /* Should be called under a lock */
418 static void __mlx4_free_from_zone(struct mlx4_zone_entry *zone, u32 obj,
419 u32 count)
421 mlx4_bitmap_free_range(zone->bitmap, obj - zone->offset, count, zone->use_rr);
424 /* Should be called under a lock */
425 static struct mlx4_zone_entry *__mlx4_find_zone_by_uid(
426 struct mlx4_zone_allocator *zones, u32 uid)
428 struct mlx4_zone_entry *zone;
430 list_for_each_entry(zone, &zones->entries, list) {
431 if (zone->uid == uid)
432 return zone;
435 return NULL;
438 struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32 uid)
440 struct mlx4_zone_entry *zone;
441 struct mlx4_bitmap *bitmap;
443 spin_lock(&zones->lock);
445 zone = __mlx4_find_zone_by_uid(zones, uid);
447 bitmap = zone == NULL ? NULL : zone->bitmap;
449 spin_unlock(&zones->lock);
451 return bitmap;
454 int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid)
456 struct mlx4_zone_entry *zone;
457 int res = 0;
459 spin_lock(&zones->lock);
461 zone = __mlx4_find_zone_by_uid(zones, uid);
463 if (NULL == zone) {
464 res = -1;
465 goto out;
468 __mlx4_zone_remove_one_entry(zone);
470 out:
471 spin_unlock(&zones->lock);
472 kfree(zone);
474 return res;
477 /* Should be called under a lock */
478 static struct mlx4_zone_entry *__mlx4_find_zone_by_uid_unique(
479 struct mlx4_zone_allocator *zones, u32 obj)
481 struct mlx4_zone_entry *zone, *zone_candidate = NULL;
482 u32 dist = (u32)-1;
484 /* Search for the smallest zone that this obj could be
485 * allocated from. This is done in order to handle
486 * situations when small bitmaps are allocated from bigger
487 * bitmaps (and the allocated space is marked as reserved in
488 * the bigger bitmap.
490 list_for_each_entry(zone, &zones->entries, list) {
491 if (obj >= zone->offset) {
492 u32 mobj = (obj - zone->offset) & zones->mask;
494 if (mobj < zone->bitmap->max) {
495 u32 curr_dist = zone->bitmap->effective_len;
497 if (curr_dist < dist) {
498 dist = curr_dist;
499 zone_candidate = zone;
505 return zone_candidate;
508 u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count,
509 int align, u32 skip_mask, u32 *puid)
511 struct mlx4_zone_entry *zone;
512 int res = -1;
514 spin_lock(&zones->lock);
516 zone = __mlx4_find_zone_by_uid(zones, uid);
518 if (NULL == zone)
519 goto out;
521 res = __mlx4_alloc_from_zone(zone, count, align, skip_mask, puid);
523 out:
524 spin_unlock(&zones->lock);
526 return res;
529 u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, u32 uid, u32 obj, u32 count)
531 struct mlx4_zone_entry *zone;
532 int res = 0;
534 spin_lock(&zones->lock);
536 zone = __mlx4_find_zone_by_uid(zones, uid);
538 if (NULL == zone) {
539 res = -1;
540 goto out;
543 __mlx4_free_from_zone(zone, obj, count);
545 out:
546 spin_unlock(&zones->lock);
548 return res;
551 u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count)
553 struct mlx4_zone_entry *zone;
554 int res;
556 if (!(zones->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP))
557 return -EFAULT;
559 spin_lock(&zones->lock);
561 zone = __mlx4_find_zone_by_uid_unique(zones, obj);
563 if (NULL == zone) {
564 res = -1;
565 goto out;
568 __mlx4_free_from_zone(zone, obj, count);
569 res = 0;
571 out:
572 spin_unlock(&zones->lock);
574 return res;
577 static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
578 struct mlx4_buf *buf)
580 dma_addr_t t;
582 buf->nbufs = 1;
583 buf->npages = 1;
584 buf->page_shift = get_order(size) + PAGE_SHIFT;
585 buf->direct.buf =
586 dma_alloc_coherent(&dev->persist->pdev->dev, size, &t,
587 GFP_KERNEL);
588 if (!buf->direct.buf)
589 return -ENOMEM;
591 buf->direct.map = t;
593 while (t & ((1 << buf->page_shift) - 1)) {
594 --buf->page_shift;
595 buf->npages *= 2;
598 return 0;
601 /* Handling for queue buffers -- we allocate a bunch of memory and
602 * register it in a memory region at HCA virtual address 0. If the
603 * requested size is > max_direct, we split the allocation into
604 * multiple pages, so we don't require too much contiguous memory.
606 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
607 struct mlx4_buf *buf)
609 if (size <= max_direct) {
610 return mlx4_buf_direct_alloc(dev, size, buf);
611 } else {
612 dma_addr_t t;
613 int i;
615 buf->direct.buf = NULL;
616 buf->nbufs = DIV_ROUND_UP(size, PAGE_SIZE);
617 buf->npages = buf->nbufs;
618 buf->page_shift = PAGE_SHIFT;
619 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
620 GFP_KERNEL);
621 if (!buf->page_list)
622 return -ENOMEM;
624 for (i = 0; i < buf->nbufs; ++i) {
625 buf->page_list[i].buf =
626 dma_alloc_coherent(&dev->persist->pdev->dev,
627 PAGE_SIZE, &t, GFP_KERNEL);
628 if (!buf->page_list[i].buf)
629 goto err_free;
631 buf->page_list[i].map = t;
635 return 0;
637 err_free:
638 mlx4_buf_free(dev, size, buf);
640 return -ENOMEM;
642 EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
644 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
646 if (buf->nbufs == 1) {
647 dma_free_coherent(&dev->persist->pdev->dev, size,
648 buf->direct.buf, buf->direct.map);
649 } else {
650 int i;
652 for (i = 0; i < buf->nbufs; ++i)
653 if (buf->page_list[i].buf)
654 dma_free_coherent(&dev->persist->pdev->dev,
655 PAGE_SIZE,
656 buf->page_list[i].buf,
657 buf->page_list[i].map);
658 kfree(buf->page_list);
661 EXPORT_SYMBOL_GPL(mlx4_buf_free);
663 static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
665 struct mlx4_db_pgdir *pgdir;
667 pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
668 if (!pgdir)
669 return NULL;
671 bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
672 pgdir->bits[0] = pgdir->order0;
673 pgdir->bits[1] = pgdir->order1;
674 pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
675 &pgdir->db_dma, GFP_KERNEL);
676 if (!pgdir->db_page) {
677 kfree(pgdir);
678 return NULL;
681 return pgdir;
684 static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
685 struct mlx4_db *db, int order)
687 int o;
688 int i;
690 for (o = order; o <= 1; ++o) {
691 i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
692 if (i < MLX4_DB_PER_PAGE >> o)
693 goto found;
696 return -ENOMEM;
698 found:
699 clear_bit(i, pgdir->bits[o]);
701 i <<= o;
703 if (o > order)
704 set_bit(i ^ 1, pgdir->bits[order]);
706 db->u.pgdir = pgdir;
707 db->index = i;
708 db->db = pgdir->db_page + db->index;
709 db->dma = pgdir->db_dma + db->index * 4;
710 db->order = order;
712 return 0;
715 int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
717 struct mlx4_priv *priv = mlx4_priv(dev);
718 struct mlx4_db_pgdir *pgdir;
719 int ret = 0;
721 mutex_lock(&priv->pgdir_mutex);
723 list_for_each_entry(pgdir, &priv->pgdir_list, list)
724 if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
725 goto out;
727 pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev);
728 if (!pgdir) {
729 ret = -ENOMEM;
730 goto out;
733 list_add(&pgdir->list, &priv->pgdir_list);
735 /* This should never fail -- we just allocated an empty page: */
736 WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
738 out:
739 mutex_unlock(&priv->pgdir_mutex);
741 return ret;
743 EXPORT_SYMBOL_GPL(mlx4_db_alloc);
745 void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
747 struct mlx4_priv *priv = mlx4_priv(dev);
748 int o;
749 int i;
751 mutex_lock(&priv->pgdir_mutex);
753 o = db->order;
754 i = db->index;
756 if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
757 clear_bit(i ^ 1, db->u.pgdir->order0);
758 ++o;
760 i >>= o;
761 set_bit(i, db->u.pgdir->bits[o]);
763 if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
764 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
765 db->u.pgdir->db_page, db->u.pgdir->db_dma);
766 list_del(&db->u.pgdir->list);
767 kfree(db->u.pgdir);
770 mutex_unlock(&priv->pgdir_mutex);
772 EXPORT_SYMBOL_GPL(mlx4_db_free);
774 int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
775 int size)
777 int err;
779 err = mlx4_db_alloc(dev, &wqres->db, 1);
780 if (err)
781 return err;
783 *wqres->db.db = 0;
785 err = mlx4_buf_direct_alloc(dev, size, &wqres->buf);
786 if (err)
787 goto err_db;
789 err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
790 &wqres->mtt);
791 if (err)
792 goto err_buf;
794 err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
795 if (err)
796 goto err_mtt;
798 return 0;
800 err_mtt:
801 mlx4_mtt_cleanup(dev, &wqres->mtt);
802 err_buf:
803 mlx4_buf_free(dev, size, &wqres->buf);
804 err_db:
805 mlx4_db_free(dev, &wqres->db);
807 return err;
809 EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res);
811 void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
812 int size)
814 mlx4_mtt_cleanup(dev, &wqres->mtt);
815 mlx4_buf_free(dev, size, &wqres->buf);
816 mlx4_db_free(dev, &wqres->db);
818 EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);