2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: mthca_mr.c 1349 2004-12-16 21:09:43Z roland $
35 #include <linux/slab.h>
36 #include <linux/init.h>
37 #include <linux/errno.h>
39 #include "mthca_dev.h"
40 #include "mthca_cmd.h"
41 #include "mthca_memfree.h"
44 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
46 struct mthca_mpt_entry
{
55 u32 window_count_limit
;
57 u32 mtt_sz
; /* Arbel only */
59 } __attribute__((packed
));
61 #define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28)
62 #define MTHCA_MPT_FLAG_MIO (1 << 17)
63 #define MTHCA_MPT_FLAG_BIND_ENABLE (1 << 15)
64 #define MTHCA_MPT_FLAG_PHYSICAL (1 << 9)
65 #define MTHCA_MPT_FLAG_REGION (1 << 8)
67 #define MTHCA_MTT_FLAG_PRESENT 1
69 #define MTHCA_MPT_STATUS_SW 0xF0
70 #define MTHCA_MPT_STATUS_HW 0x00
73 * Buddy allocator for MTT segments (currently not very efficient
74 * since it doesn't keep a free list and just searches linearly
75 * through the bitmaps)
78 static u32
mthca_buddy_alloc(struct mthca_buddy
*buddy
, int order
)
84 spin_lock(&buddy
->lock
);
86 for (o
= order
; o
<= buddy
->max_order
; ++o
) {
87 m
= 1 << (buddy
->max_order
- o
);
88 seg
= find_first_bit(buddy
->bits
[o
], m
);
93 spin_unlock(&buddy
->lock
);
97 clear_bit(seg
, buddy
->bits
[o
]);
102 set_bit(seg
^ 1, buddy
->bits
[o
]);
105 spin_unlock(&buddy
->lock
);
112 static void mthca_buddy_free(struct mthca_buddy
*buddy
, u32 seg
, int order
)
116 spin_lock(&buddy
->lock
);
118 while (test_bit(seg
^ 1, buddy
->bits
[order
])) {
119 clear_bit(seg
^ 1, buddy
->bits
[order
]);
124 set_bit(seg
, buddy
->bits
[order
]);
126 spin_unlock(&buddy
->lock
);
129 static int __devinit
mthca_buddy_init(struct mthca_buddy
*buddy
, int max_order
)
133 buddy
->max_order
= max_order
;
134 spin_lock_init(&buddy
->lock
);
136 buddy
->bits
= kmalloc((buddy
->max_order
+ 1) * sizeof (long *),
141 memset(buddy
->bits
, 0, (buddy
->max_order
+ 1) * sizeof (long *));
143 for (i
= 0; i
<= buddy
->max_order
; ++i
) {
144 s
= BITS_TO_LONGS(1 << (buddy
->max_order
- i
));
145 buddy
->bits
[i
] = kmalloc(s
* sizeof (long), GFP_KERNEL
);
148 bitmap_zero(buddy
->bits
[i
],
149 1 << (buddy
->max_order
- i
));
152 set_bit(0, buddy
->bits
[buddy
->max_order
]);
157 for (i
= 0; i
<= buddy
->max_order
; ++i
)
158 kfree(buddy
->bits
[i
]);
166 static void __devexit
mthca_buddy_cleanup(struct mthca_buddy
*buddy
)
170 for (i
= 0; i
<= buddy
->max_order
; ++i
)
171 kfree(buddy
->bits
[i
]);
176 static u32
mthca_alloc_mtt(struct mthca_dev
*dev
, int order
,
177 struct mthca_buddy
*buddy
)
179 u32 seg
= mthca_buddy_alloc(buddy
, order
);
184 if (mthca_is_memfree(dev
))
185 if (mthca_table_get_range(dev
, dev
->mr_table
.mtt_table
, seg
,
186 seg
+ (1 << order
) - 1)) {
187 mthca_buddy_free(buddy
, seg
, order
);
194 static void mthca_free_mtt(struct mthca_dev
*dev
, u32 seg
, int order
,
195 struct mthca_buddy
* buddy
)
197 mthca_buddy_free(buddy
, seg
, order
);
199 if (mthca_is_memfree(dev
))
200 mthca_table_put_range(dev
, dev
->mr_table
.mtt_table
, seg
,
201 seg
+ (1 << order
) - 1);
204 static inline u32
tavor_hw_index_to_key(u32 ind
)
209 static inline u32
tavor_key_to_hw_index(u32 key
)
214 static inline u32
arbel_hw_index_to_key(u32 ind
)
216 return (ind
>> 24) | (ind
<< 8);
219 static inline u32
arbel_key_to_hw_index(u32 key
)
221 return (key
<< 24) | (key
>> 8);
224 static inline u32
hw_index_to_key(struct mthca_dev
*dev
, u32 ind
)
226 if (mthca_is_memfree(dev
))
227 return arbel_hw_index_to_key(ind
);
229 return tavor_hw_index_to_key(ind
);
232 static inline u32
key_to_hw_index(struct mthca_dev
*dev
, u32 key
)
234 if (mthca_is_memfree(dev
))
235 return arbel_key_to_hw_index(key
);
237 return tavor_key_to_hw_index(key
);
240 int mthca_mr_alloc_notrans(struct mthca_dev
*dev
, u32 pd
,
241 u32 access
, struct mthca_mr
*mr
)
243 void *mailbox
= NULL
;
244 struct mthca_mpt_entry
*mpt_entry
;
252 key
= mthca_alloc(&dev
->mr_table
.mpt_alloc
);
255 mr
->ibmr
.rkey
= mr
->ibmr
.lkey
= hw_index_to_key(dev
, key
);
257 if (mthca_is_memfree(dev
)) {
258 err
= mthca_table_get(dev
, dev
->mr_table
.mpt_table
, key
);
260 goto err_out_mpt_free
;
263 mailbox
= kmalloc(sizeof *mpt_entry
+ MTHCA_CMD_MAILBOX_EXTRA
,
269 mpt_entry
= MAILBOX_ALIGN(mailbox
);
271 mpt_entry
->flags
= cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS
|
273 MTHCA_MPT_FLAG_PHYSICAL
|
274 MTHCA_MPT_FLAG_REGION
|
276 mpt_entry
->page_size
= 0;
277 mpt_entry
->key
= cpu_to_be32(key
);
278 mpt_entry
->pd
= cpu_to_be32(pd
);
279 mpt_entry
->start
= 0;
280 mpt_entry
->length
= ~0ULL;
282 memset(&mpt_entry
->lkey
, 0,
283 sizeof *mpt_entry
- offsetof(struct mthca_mpt_entry
, lkey
));
285 err
= mthca_SW2HW_MPT(dev
, mpt_entry
,
286 key
& (dev
->limits
.num_mpts
- 1),
289 mthca_warn(dev
, "SW2HW_MPT failed (%d)\n", err
);
292 mthca_warn(dev
, "SW2HW_MPT returned status 0x%02x\n",
302 if (mthca_is_memfree(dev
))
303 mthca_table_put(dev
, dev
->mr_table
.mpt_table
, key
);
306 mthca_free(&dev
->mr_table
.mpt_alloc
, key
);
311 int mthca_mr_alloc_phys(struct mthca_dev
*dev
, u32 pd
,
312 u64
*buffer_list
, int buffer_size_shift
,
313 int list_len
, u64 iova
, u64 total_size
,
314 u32 access
, struct mthca_mr
*mr
)
318 struct mthca_mpt_entry
*mpt_entry
;
325 WARN_ON(buffer_size_shift
>= 32);
327 key
= mthca_alloc(&dev
->mr_table
.mpt_alloc
);
330 mr
->ibmr
.rkey
= mr
->ibmr
.lkey
= hw_index_to_key(dev
, key
);
332 if (mthca_is_memfree(dev
)) {
333 err
= mthca_table_get(dev
, dev
->mr_table
.mpt_table
, key
);
335 goto err_out_mpt_free
;
338 for (i
= MTHCA_MTT_SEG_SIZE
/ 8, mr
->order
= 0;
340 i
<<= 1, ++mr
->order
)
343 mr
->first_seg
= mthca_alloc_mtt(dev
, mr
->order
,
344 &dev
->mr_table
.mtt_buddy
);
345 if (mr
->first_seg
== -1)
349 * If list_len is odd, we add one more dummy entry for
350 * firmware efficiency.
352 mailbox
= kmalloc(max(sizeof *mpt_entry
,
353 (size_t) 8 * (list_len
+ (list_len
& 1) + 2)) +
354 MTHCA_CMD_MAILBOX_EXTRA
,
357 goto err_out_free_mtt
;
359 mtt_entry
= MAILBOX_ALIGN(mailbox
);
361 mtt_entry
[0] = cpu_to_be64(dev
->mr_table
.mtt_base
+
362 mr
->first_seg
* MTHCA_MTT_SEG_SIZE
);
364 for (i
= 0; i
< list_len
; ++i
)
365 mtt_entry
[i
+ 2] = cpu_to_be64(buffer_list
[i
] |
366 MTHCA_MTT_FLAG_PRESENT
);
368 mtt_entry
[i
+ 2] = 0;
373 mthca_dbg(dev
, "Dumping MPT entry\n");
374 for (i
= 0; i
< list_len
+ 2; ++i
)
375 printk(KERN_ERR
"[%2d] %016llx\n",
376 i
, (unsigned long long) be64_to_cpu(mtt_entry
[i
]));
379 err
= mthca_WRITE_MTT(dev
, mtt_entry
, list_len
, &status
);
381 mthca_warn(dev
, "WRITE_MTT failed (%d)\n", err
);
382 goto err_out_mailbox_free
;
385 mthca_warn(dev
, "WRITE_MTT returned status 0x%02x\n",
388 goto err_out_mailbox_free
;
391 mpt_entry
= MAILBOX_ALIGN(mailbox
);
393 mpt_entry
->flags
= cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS
|
395 MTHCA_MPT_FLAG_REGION
|
398 mpt_entry
->page_size
= cpu_to_be32(buffer_size_shift
- 12);
399 mpt_entry
->key
= cpu_to_be32(key
);
400 mpt_entry
->pd
= cpu_to_be32(pd
);
401 mpt_entry
->start
= cpu_to_be64(iova
);
402 mpt_entry
->length
= cpu_to_be64(total_size
);
403 memset(&mpt_entry
->lkey
, 0,
404 sizeof *mpt_entry
- offsetof(struct mthca_mpt_entry
, lkey
));
405 mpt_entry
->mtt_seg
= cpu_to_be64(dev
->mr_table
.mtt_base
+
406 mr
->first_seg
* MTHCA_MTT_SEG_SIZE
);
409 mthca_dbg(dev
, "Dumping MPT entry %08x:\n", mr
->ibmr
.lkey
);
410 for (i
= 0; i
< sizeof (struct mthca_mpt_entry
) / 4; ++i
) {
412 printk("[%02x] ", i
* 4);
413 printk(" %08x", be32_to_cpu(((u32
*) mpt_entry
)[i
]));
414 if ((i
+ 1) % 4 == 0)
419 err
= mthca_SW2HW_MPT(dev
, mpt_entry
,
420 key
& (dev
->limits
.num_mpts
- 1),
423 mthca_warn(dev
, "SW2HW_MPT failed (%d)\n", err
);
425 mthca_warn(dev
, "SW2HW_MPT returned status 0x%02x\n",
433 err_out_mailbox_free
:
437 mthca_free_mtt(dev
, mr
->first_seg
, mr
->order
, &dev
->mr_table
.mtt_buddy
);
440 if (mthca_is_memfree(dev
))
441 mthca_table_put(dev
, dev
->mr_table
.mpt_table
, key
);
444 mthca_free(&dev
->mr_table
.mpt_alloc
, key
);
449 static void mthca_free_region(struct mthca_dev
*dev
, u32 lkey
, int order
,
450 u32 first_seg
, struct mthca_buddy
*buddy
)
453 mthca_free_mtt(dev
, first_seg
, order
, buddy
);
455 if (mthca_is_memfree(dev
))
456 mthca_table_put(dev
, dev
->mr_table
.mpt_table
,
457 arbel_key_to_hw_index(lkey
));
459 mthca_free(&dev
->mr_table
.mpt_alloc
, key_to_hw_index(dev
, lkey
));
462 void mthca_free_mr(struct mthca_dev
*dev
, struct mthca_mr
*mr
)
469 err
= mthca_HW2SW_MPT(dev
, NULL
,
470 key_to_hw_index(dev
, mr
->ibmr
.lkey
) &
471 (dev
->limits
.num_mpts
- 1),
474 mthca_warn(dev
, "HW2SW_MPT failed (%d)\n", err
);
476 mthca_warn(dev
, "HW2SW_MPT returned status 0x%02x\n",
479 mthca_free_region(dev
, mr
->ibmr
.lkey
, mr
->order
, mr
->first_seg
,
480 &dev
->mr_table
.mtt_buddy
);
483 int mthca_fmr_alloc(struct mthca_dev
*dev
, u32 pd
,
484 u32 access
, struct mthca_fmr
*mr
)
486 struct mthca_mpt_entry
*mpt_entry
;
491 int list_len
= mr
->attr
.max_pages
;
497 if (mr
->attr
.page_size
< 12 || mr
->attr
.page_size
>= 32)
500 /* For Arbel, all MTTs must fit in the same page. */
501 if (mthca_is_memfree(dev
) &&
502 mr
->attr
.max_pages
* sizeof *mr
->mem
.arbel
.mtts
> PAGE_SIZE
)
507 key
= mthca_alloc(&dev
->mr_table
.mpt_alloc
);
511 idx
= key
& (dev
->limits
.num_mpts
- 1);
512 mr
->ibmr
.rkey
= mr
->ibmr
.lkey
= hw_index_to_key(dev
, key
);
514 if (mthca_is_memfree(dev
)) {
515 err
= mthca_table_get(dev
, dev
->mr_table
.mpt_table
, key
);
517 goto err_out_mpt_free
;
519 mr
->mem
.arbel
.mpt
= mthca_table_find(dev
->mr_table
.mpt_table
, key
);
520 BUG_ON(!mr
->mem
.arbel
.mpt
);
522 mr
->mem
.tavor
.mpt
= dev
->mr_table
.tavor_fmr
.mpt_base
+
523 sizeof *(mr
->mem
.tavor
.mpt
) * idx
;
525 for (i
= MTHCA_MTT_SEG_SIZE
/ 8, mr
->order
= 0;
527 i
<<= 1, ++mr
->order
)
530 mr
->first_seg
= mthca_alloc_mtt(dev
, mr
->order
,
531 dev
->mr_table
.fmr_mtt_buddy
);
532 if (mr
->first_seg
== -1)
535 mtt_seg
= mr
->first_seg
* MTHCA_MTT_SEG_SIZE
;
537 if (mthca_is_memfree(dev
)) {
538 mr
->mem
.arbel
.mtts
= mthca_table_find(dev
->mr_table
.mtt_table
,
540 BUG_ON(!mr
->mem
.arbel
.mtts
);
542 mr
->mem
.tavor
.mtts
= dev
->mr_table
.tavor_fmr
.mtt_base
+ mtt_seg
;
544 mailbox
= kmalloc(sizeof *mpt_entry
+ MTHCA_CMD_MAILBOX_EXTRA
,
547 goto err_out_free_mtt
;
549 mpt_entry
= MAILBOX_ALIGN(mailbox
);
551 mpt_entry
->flags
= cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS
|
553 MTHCA_MPT_FLAG_REGION
|
556 mpt_entry
->page_size
= cpu_to_be32(mr
->attr
.page_size
- 12);
557 mpt_entry
->key
= cpu_to_be32(key
);
558 mpt_entry
->pd
= cpu_to_be32(pd
);
559 memset(&mpt_entry
->start
, 0,
560 sizeof *mpt_entry
- offsetof(struct mthca_mpt_entry
, start
));
561 mpt_entry
->mtt_seg
= cpu_to_be64(dev
->mr_table
.mtt_base
+ mtt_seg
);
564 mthca_dbg(dev
, "Dumping MPT entry %08x:\n", mr
->ibmr
.lkey
);
565 for (i
= 0; i
< sizeof (struct mthca_mpt_entry
) / 4; ++i
) {
567 printk("[%02x] ", i
* 4);
568 printk(" %08x", be32_to_cpu(((u32
*) mpt_entry
)[i
]));
569 if ((i
+ 1) % 4 == 0)
574 err
= mthca_SW2HW_MPT(dev
, mpt_entry
,
575 key
& (dev
->limits
.num_mpts
- 1),
578 mthca_warn(dev
, "SW2HW_MPT failed (%d)\n", err
);
579 goto err_out_mailbox_free
;
582 mthca_warn(dev
, "SW2HW_MPT returned status 0x%02x\n",
585 goto err_out_mailbox_free
;
591 err_out_mailbox_free
:
595 mthca_free_mtt(dev
, mr
->first_seg
, mr
->order
,
596 dev
->mr_table
.fmr_mtt_buddy
);
599 if (mthca_is_memfree(dev
))
600 mthca_table_put(dev
, dev
->mr_table
.mpt_table
, key
);
603 mthca_free(&dev
->mr_table
.mpt_alloc
, mr
->ibmr
.lkey
);
607 int mthca_free_fmr(struct mthca_dev
*dev
, struct mthca_fmr
*fmr
)
612 mthca_free_region(dev
, fmr
->ibmr
.lkey
, fmr
->order
, fmr
->first_seg
,
613 dev
->mr_table
.fmr_mtt_buddy
);
617 static inline int mthca_check_fmr(struct mthca_fmr
*fmr
, u64
*page_list
,
618 int list_len
, u64 iova
)
622 if (list_len
> fmr
->attr
.max_pages
)
625 page_mask
= (1 << fmr
->attr
.page_size
) - 1;
627 /* We are getting page lists, so va must be page aligned. */
628 if (iova
& page_mask
)
631 /* Trust the user not to pass misaligned data in page_list */
633 for (i
= 0; i
< list_len
; ++i
) {
634 if (page_list
[i
] & ~page_mask
)
638 if (fmr
->maps
>= fmr
->attr
.max_maps
)
645 int mthca_tavor_map_phys_fmr(struct ib_fmr
*ibfmr
, u64
*page_list
,
646 int list_len
, u64 iova
)
648 struct mthca_fmr
*fmr
= to_mfmr(ibfmr
);
649 struct mthca_dev
*dev
= to_mdev(ibfmr
->device
);
650 struct mthca_mpt_entry mpt_entry
;
654 err
= mthca_check_fmr(fmr
, page_list
, list_len
, iova
);
660 key
= tavor_key_to_hw_index(fmr
->ibmr
.lkey
);
661 key
+= dev
->limits
.num_mpts
;
662 fmr
->ibmr
.lkey
= fmr
->ibmr
.rkey
= tavor_hw_index_to_key(key
);
664 writeb(MTHCA_MPT_STATUS_SW
, fmr
->mem
.tavor
.mpt
);
666 for (i
= 0; i
< list_len
; ++i
) {
667 __be64 mtt_entry
= cpu_to_be64(page_list
[i
] |
668 MTHCA_MTT_FLAG_PRESENT
);
669 mthca_write64_raw(mtt_entry
, fmr
->mem
.tavor
.mtts
+ i
);
672 mpt_entry
.lkey
= cpu_to_be32(key
);
673 mpt_entry
.length
= cpu_to_be64(list_len
* (1ull << fmr
->attr
.page_size
));
674 mpt_entry
.start
= cpu_to_be64(iova
);
676 writel(mpt_entry
.lkey
, &fmr
->mem
.tavor
.mpt
->key
);
677 memcpy_toio(&fmr
->mem
.tavor
.mpt
->start
, &mpt_entry
.start
,
678 offsetof(struct mthca_mpt_entry
, window_count
) -
679 offsetof(struct mthca_mpt_entry
, start
));
681 writeb(MTHCA_MPT_STATUS_HW
, fmr
->mem
.tavor
.mpt
);
686 int mthca_arbel_map_phys_fmr(struct ib_fmr
*ibfmr
, u64
*page_list
,
687 int list_len
, u64 iova
)
689 struct mthca_fmr
*fmr
= to_mfmr(ibfmr
);
690 struct mthca_dev
*dev
= to_mdev(ibfmr
->device
);
694 err
= mthca_check_fmr(fmr
, page_list
, list_len
, iova
);
700 key
= arbel_key_to_hw_index(fmr
->ibmr
.lkey
);
701 key
+= dev
->limits
.num_mpts
;
702 fmr
->ibmr
.lkey
= fmr
->ibmr
.rkey
= arbel_hw_index_to_key(key
);
704 *(u8
*) fmr
->mem
.arbel
.mpt
= MTHCA_MPT_STATUS_SW
;
708 for (i
= 0; i
< list_len
; ++i
)
709 fmr
->mem
.arbel
.mtts
[i
] = cpu_to_be64(page_list
[i
] |
710 MTHCA_MTT_FLAG_PRESENT
);
712 fmr
->mem
.arbel
.mpt
->key
= cpu_to_be32(key
);
713 fmr
->mem
.arbel
.mpt
->lkey
= cpu_to_be32(key
);
714 fmr
->mem
.arbel
.mpt
->length
= cpu_to_be64(list_len
* (1ull << fmr
->attr
.page_size
));
715 fmr
->mem
.arbel
.mpt
->start
= cpu_to_be64(iova
);
719 *(u8
*) fmr
->mem
.arbel
.mpt
= MTHCA_MPT_STATUS_HW
;
726 void mthca_tavor_fmr_unmap(struct mthca_dev
*dev
, struct mthca_fmr
*fmr
)
733 key
= tavor_key_to_hw_index(fmr
->ibmr
.lkey
);
734 key
&= dev
->limits
.num_mpts
- 1;
735 fmr
->ibmr
.lkey
= fmr
->ibmr
.rkey
= tavor_hw_index_to_key(key
);
739 writeb(MTHCA_MPT_STATUS_SW
, fmr
->mem
.tavor
.mpt
);
742 void mthca_arbel_fmr_unmap(struct mthca_dev
*dev
, struct mthca_fmr
*fmr
)
749 key
= arbel_key_to_hw_index(fmr
->ibmr
.lkey
);
750 key
&= dev
->limits
.num_mpts
- 1;
751 fmr
->ibmr
.lkey
= fmr
->ibmr
.rkey
= arbel_hw_index_to_key(key
);
755 *(u8
*) fmr
->mem
.arbel
.mpt
= MTHCA_MPT_STATUS_SW
;
758 int __devinit
mthca_init_mr_table(struct mthca_dev
*dev
)
762 err
= mthca_alloc_init(&dev
->mr_table
.mpt_alloc
,
763 dev
->limits
.num_mpts
,
764 ~0, dev
->limits
.reserved_mrws
);
768 if (!mthca_is_memfree(dev
) &&
769 (dev
->mthca_flags
& MTHCA_FLAG_DDR_HIDDEN
))
770 dev
->limits
.fmr_reserved_mtts
= 0;
772 dev
->mthca_flags
|= MTHCA_FLAG_FMR
;
774 err
= mthca_buddy_init(&dev
->mr_table
.mtt_buddy
,
775 fls(dev
->limits
.num_mtt_segs
- 1));
780 dev
->mr_table
.tavor_fmr
.mpt_base
= NULL
;
781 dev
->mr_table
.tavor_fmr
.mtt_base
= NULL
;
783 if (dev
->limits
.fmr_reserved_mtts
) {
784 i
= fls(dev
->limits
.fmr_reserved_mtts
- 1);
787 mthca_warn(dev
, "Unable to reserve 2^31 FMR MTTs.\n");
792 dev
->mr_table
.tavor_fmr
.mpt_base
=
793 ioremap(dev
->mr_table
.mpt_base
,
794 (1 << i
) * sizeof (struct mthca_mpt_entry
));
796 if (!dev
->mr_table
.tavor_fmr
.mpt_base
) {
797 mthca_warn(dev
, "MPT ioremap for FMR failed.\n");
802 dev
->mr_table
.tavor_fmr
.mtt_base
=
803 ioremap(dev
->mr_table
.mtt_base
,
804 (1 << i
) * MTHCA_MTT_SEG_SIZE
);
805 if (!dev
->mr_table
.tavor_fmr
.mtt_base
) {
806 mthca_warn(dev
, "MTT ioremap for FMR failed.\n");
811 err
= mthca_buddy_init(&dev
->mr_table
.tavor_fmr
.mtt_buddy
, i
);
813 goto err_fmr_mtt_buddy
;
815 /* Prevent regular MRs from using FMR keys */
816 err
= mthca_buddy_alloc(&dev
->mr_table
.mtt_buddy
, i
);
818 goto err_reserve_fmr
;
820 dev
->mr_table
.fmr_mtt_buddy
=
821 &dev
->mr_table
.tavor_fmr
.mtt_buddy
;
823 dev
->mr_table
.fmr_mtt_buddy
= &dev
->mr_table
.mtt_buddy
;
825 /* FMR table is always the first, take reserved MTTs out of there */
826 if (dev
->limits
.reserved_mtts
) {
827 i
= fls(dev
->limits
.reserved_mtts
- 1);
829 if (mthca_alloc_mtt(dev
, i
, dev
->mr_table
.fmr_mtt_buddy
) == -1) {
830 mthca_warn(dev
, "MTT table of order %d is too small.\n",
831 dev
->mr_table
.fmr_mtt_buddy
->max_order
);
833 goto err_reserve_mtts
;
841 if (dev
->limits
.fmr_reserved_mtts
)
842 mthca_buddy_cleanup(&dev
->mr_table
.tavor_fmr
.mtt_buddy
);
845 if (dev
->mr_table
.tavor_fmr
.mtt_base
)
846 iounmap(dev
->mr_table
.tavor_fmr
.mtt_base
);
849 if (dev
->mr_table
.tavor_fmr
.mpt_base
)
850 iounmap(dev
->mr_table
.tavor_fmr
.mpt_base
);
853 mthca_buddy_cleanup(&dev
->mr_table
.mtt_buddy
);
856 mthca_alloc_cleanup(&dev
->mr_table
.mpt_alloc
);
861 void __devexit
mthca_cleanup_mr_table(struct mthca_dev
*dev
)
863 /* XXX check if any MRs are still allocated? */
864 if (dev
->limits
.fmr_reserved_mtts
)
865 mthca_buddy_cleanup(&dev
->mr_table
.tavor_fmr
.mtt_buddy
);
867 mthca_buddy_cleanup(&dev
->mr_table
.mtt_buddy
);
869 if (dev
->mr_table
.tavor_fmr
.mtt_base
)
870 iounmap(dev
->mr_table
.tavor_fmr
.mtt_base
);
871 if (dev
->mr_table
.tavor_fmr
.mpt_base
)
872 iounmap(dev
->mr_table
.tavor_fmr
.mpt_base
);
874 mthca_alloc_cleanup(&dev
->mr_table
.mpt_alloc
);