2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * $Id: mthca_mr.c 1349 2004-12-16 21:09:43Z roland $
36 #include <linux/slab.h>
37 #include <linux/errno.h>
39 #include "mthca_dev.h"
40 #include "mthca_cmd.h"
41 #include "mthca_memfree.h"
44 struct mthca_buddy
*buddy
;
50 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
52 struct mthca_mpt_entry
{
61 __be32 window_count_limit
;
63 __be32 mtt_sz
; /* Arbel only */
65 } __attribute__((packed
));
67 #define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28)
68 #define MTHCA_MPT_FLAG_MIO (1 << 17)
69 #define MTHCA_MPT_FLAG_BIND_ENABLE (1 << 15)
70 #define MTHCA_MPT_FLAG_PHYSICAL (1 << 9)
71 #define MTHCA_MPT_FLAG_REGION (1 << 8)
73 #define MTHCA_MTT_FLAG_PRESENT 1
75 #define MTHCA_MPT_STATUS_SW 0xF0
76 #define MTHCA_MPT_STATUS_HW 0x00
78 #define SINAI_FMR_KEY_INC 0x1000000
81 * Buddy allocator for MTT segments (currently not very efficient
82 * since it doesn't keep a free list and just searches linearly
83 * through the bitmaps)
86 static u32
mthca_buddy_alloc(struct mthca_buddy
*buddy
, int order
)
92 spin_lock(&buddy
->lock
);
94 for (o
= order
; o
<= buddy
->max_order
; ++o
) {
95 m
= 1 << (buddy
->max_order
- o
);
96 seg
= find_first_bit(buddy
->bits
[o
], m
);
101 spin_unlock(&buddy
->lock
);
105 clear_bit(seg
, buddy
->bits
[o
]);
110 set_bit(seg
^ 1, buddy
->bits
[o
]);
113 spin_unlock(&buddy
->lock
);
120 static void mthca_buddy_free(struct mthca_buddy
*buddy
, u32 seg
, int order
)
124 spin_lock(&buddy
->lock
);
126 while (test_bit(seg
^ 1, buddy
->bits
[order
])) {
127 clear_bit(seg
^ 1, buddy
->bits
[order
]);
132 set_bit(seg
, buddy
->bits
[order
]);
134 spin_unlock(&buddy
->lock
);
137 static int mthca_buddy_init(struct mthca_buddy
*buddy
, int max_order
)
141 buddy
->max_order
= max_order
;
142 spin_lock_init(&buddy
->lock
);
144 buddy
->bits
= kzalloc((buddy
->max_order
+ 1) * sizeof (long *),
149 for (i
= 0; i
<= buddy
->max_order
; ++i
) {
150 s
= BITS_TO_LONGS(1 << (buddy
->max_order
- i
));
151 buddy
->bits
[i
] = kmalloc(s
* sizeof (long), GFP_KERNEL
);
154 bitmap_zero(buddy
->bits
[i
],
155 1 << (buddy
->max_order
- i
));
158 set_bit(0, buddy
->bits
[buddy
->max_order
]);
163 for (i
= 0; i
<= buddy
->max_order
; ++i
)
164 kfree(buddy
->bits
[i
]);
172 static void mthca_buddy_cleanup(struct mthca_buddy
*buddy
)
176 for (i
= 0; i
<= buddy
->max_order
; ++i
)
177 kfree(buddy
->bits
[i
]);
182 static u32
mthca_alloc_mtt_range(struct mthca_dev
*dev
, int order
,
183 struct mthca_buddy
*buddy
)
185 u32 seg
= mthca_buddy_alloc(buddy
, order
);
190 if (mthca_is_memfree(dev
))
191 if (mthca_table_get_range(dev
, dev
->mr_table
.mtt_table
, seg
,
192 seg
+ (1 << order
) - 1)) {
193 mthca_buddy_free(buddy
, seg
, order
);
200 static struct mthca_mtt
*__mthca_alloc_mtt(struct mthca_dev
*dev
, int size
,
201 struct mthca_buddy
*buddy
)
203 struct mthca_mtt
*mtt
;
207 return ERR_PTR(-EINVAL
);
209 mtt
= kmalloc(sizeof *mtt
, GFP_KERNEL
);
211 return ERR_PTR(-ENOMEM
);
215 for (i
= MTHCA_MTT_SEG_SIZE
/ 8; i
< size
; i
<<= 1)
218 mtt
->first_seg
= mthca_alloc_mtt_range(dev
, mtt
->order
, buddy
);
219 if (mtt
->first_seg
== -1) {
221 return ERR_PTR(-ENOMEM
);
227 struct mthca_mtt
*mthca_alloc_mtt(struct mthca_dev
*dev
, int size
)
229 return __mthca_alloc_mtt(dev
, size
, &dev
->mr_table
.mtt_buddy
);
232 void mthca_free_mtt(struct mthca_dev
*dev
, struct mthca_mtt
*mtt
)
237 mthca_buddy_free(mtt
->buddy
, mtt
->first_seg
, mtt
->order
);
239 mthca_table_put_range(dev
, dev
->mr_table
.mtt_table
,
241 mtt
->first_seg
+ (1 << mtt
->order
) - 1);
246 static int __mthca_write_mtt(struct mthca_dev
*dev
, struct mthca_mtt
*mtt
,
247 int start_index
, u64
*buffer_list
, int list_len
)
249 struct mthca_mailbox
*mailbox
;
255 mailbox
= mthca_alloc_mailbox(dev
, GFP_KERNEL
);
257 return PTR_ERR(mailbox
);
258 mtt_entry
= mailbox
->buf
;
260 while (list_len
> 0) {
261 mtt_entry
[0] = cpu_to_be64(dev
->mr_table
.mtt_base
+
262 mtt
->first_seg
* MTHCA_MTT_SEG_SIZE
+
265 for (i
= 0; i
< list_len
&& i
< MTHCA_MAILBOX_SIZE
/ 8 - 2; ++i
)
266 mtt_entry
[i
+ 2] = cpu_to_be64(buffer_list
[i
] |
267 MTHCA_MTT_FLAG_PRESENT
);
270 * If we have an odd number of entries to write, add
271 * one more dummy entry for firmware efficiency.
274 mtt_entry
[i
+ 2] = 0;
276 err
= mthca_WRITE_MTT(dev
, mailbox
, (i
+ 1) & ~1, &status
);
278 mthca_warn(dev
, "WRITE_MTT failed (%d)\n", err
);
282 mthca_warn(dev
, "WRITE_MTT returned status 0x%02x\n",
294 mthca_free_mailbox(dev
, mailbox
);
298 int mthca_write_mtt_size(struct mthca_dev
*dev
)
300 if (dev
->mr_table
.fmr_mtt_buddy
!= &dev
->mr_table
.mtt_buddy
||
301 !(dev
->mthca_flags
& MTHCA_FLAG_FMR
))
303 * Be friendly to WRITE_MTT command
304 * and leave two empty slots for the
305 * index and reserved fields of the
308 return PAGE_SIZE
/ sizeof (u64
) - 2;
310 /* For Arbel, all MTTs must fit in the same page. */
311 return mthca_is_memfree(dev
) ? (PAGE_SIZE
/ sizeof (u64
)) : 0x7ffffff;
314 static void mthca_tavor_write_mtt_seg(struct mthca_dev
*dev
,
315 struct mthca_mtt
*mtt
, int start_index
,
316 u64
*buffer_list
, int list_len
)
321 mtts
= dev
->mr_table
.tavor_fmr
.mtt_base
+ mtt
->first_seg
* MTHCA_MTT_SEG_SIZE
+
322 start_index
* sizeof (u64
);
323 for (i
= 0; i
< list_len
; ++i
)
324 mthca_write64_raw(cpu_to_be64(buffer_list
[i
] | MTHCA_MTT_FLAG_PRESENT
),
328 static void mthca_arbel_write_mtt_seg(struct mthca_dev
*dev
,
329 struct mthca_mtt
*mtt
, int start_index
,
330 u64
*buffer_list
, int list_len
)
333 dma_addr_t dma_handle
;
335 int s
= start_index
* sizeof (u64
);
337 /* For Arbel, all MTTs must fit in the same page. */
338 BUG_ON(s
/ PAGE_SIZE
!= (s
+ list_len
* sizeof(u64
) - 1) / PAGE_SIZE
);
339 /* Require full segments */
340 BUG_ON(s
% MTHCA_MTT_SEG_SIZE
);
342 mtts
= mthca_table_find(dev
->mr_table
.mtt_table
, mtt
->first_seg
+
343 s
/ MTHCA_MTT_SEG_SIZE
, &dma_handle
);
347 for (i
= 0; i
< list_len
; ++i
)
348 mtts
[i
] = cpu_to_be64(buffer_list
[i
] | MTHCA_MTT_FLAG_PRESENT
);
350 dma_sync_single(&dev
->pdev
->dev
, dma_handle
, list_len
* sizeof (u64
), DMA_TO_DEVICE
);
353 int mthca_write_mtt(struct mthca_dev
*dev
, struct mthca_mtt
*mtt
,
354 int start_index
, u64
*buffer_list
, int list_len
)
356 int size
= mthca_write_mtt_size(dev
);
359 if (dev
->mr_table
.fmr_mtt_buddy
!= &dev
->mr_table
.mtt_buddy
||
360 !(dev
->mthca_flags
& MTHCA_FLAG_FMR
))
361 return __mthca_write_mtt(dev
, mtt
, start_index
, buffer_list
, list_len
);
363 while (list_len
> 0) {
364 chunk
= min(size
, list_len
);
365 if (mthca_is_memfree(dev
))
366 mthca_arbel_write_mtt_seg(dev
, mtt
, start_index
,
369 mthca_tavor_write_mtt_seg(dev
, mtt
, start_index
,
373 start_index
+= chunk
;
374 buffer_list
+= chunk
;
380 static inline u32
tavor_hw_index_to_key(u32 ind
)
385 static inline u32
tavor_key_to_hw_index(u32 key
)
390 static inline u32
arbel_hw_index_to_key(u32 ind
)
392 return (ind
>> 24) | (ind
<< 8);
395 static inline u32
arbel_key_to_hw_index(u32 key
)
397 return (key
<< 24) | (key
>> 8);
400 static inline u32
hw_index_to_key(struct mthca_dev
*dev
, u32 ind
)
402 if (mthca_is_memfree(dev
))
403 return arbel_hw_index_to_key(ind
);
405 return tavor_hw_index_to_key(ind
);
408 static inline u32
key_to_hw_index(struct mthca_dev
*dev
, u32 key
)
410 if (mthca_is_memfree(dev
))
411 return arbel_key_to_hw_index(key
);
413 return tavor_key_to_hw_index(key
);
416 static inline u32
adjust_key(struct mthca_dev
*dev
, u32 key
)
418 if (dev
->mthca_flags
& MTHCA_FLAG_SINAI_OPT
)
419 return ((key
<< 20) & 0x800000) | (key
& 0x7fffff);
424 int mthca_mr_alloc(struct mthca_dev
*dev
, u32 pd
, int buffer_size_shift
,
425 u64 iova
, u64 total_size
, u32 access
, struct mthca_mr
*mr
)
427 struct mthca_mailbox
*mailbox
;
428 struct mthca_mpt_entry
*mpt_entry
;
434 WARN_ON(buffer_size_shift
>= 32);
436 key
= mthca_alloc(&dev
->mr_table
.mpt_alloc
);
439 key
= adjust_key(dev
, key
);
440 mr
->ibmr
.rkey
= mr
->ibmr
.lkey
= hw_index_to_key(dev
, key
);
442 if (mthca_is_memfree(dev
)) {
443 err
= mthca_table_get(dev
, dev
->mr_table
.mpt_table
, key
);
445 goto err_out_mpt_free
;
448 mailbox
= mthca_alloc_mailbox(dev
, GFP_KERNEL
);
449 if (IS_ERR(mailbox
)) {
450 err
= PTR_ERR(mailbox
);
453 mpt_entry
= mailbox
->buf
;
455 mpt_entry
->flags
= cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS
|
457 MTHCA_MPT_FLAG_REGION
|
460 mpt_entry
->flags
|= cpu_to_be32(MTHCA_MPT_FLAG_PHYSICAL
);
462 mpt_entry
->page_size
= cpu_to_be32(buffer_size_shift
- 12);
463 mpt_entry
->key
= cpu_to_be32(key
);
464 mpt_entry
->pd
= cpu_to_be32(pd
);
465 mpt_entry
->start
= cpu_to_be64(iova
);
466 mpt_entry
->length
= cpu_to_be64(total_size
);
468 memset(&mpt_entry
->lkey
, 0,
469 sizeof *mpt_entry
- offsetof(struct mthca_mpt_entry
, lkey
));
473 cpu_to_be64(dev
->mr_table
.mtt_base
+
474 mr
->mtt
->first_seg
* MTHCA_MTT_SEG_SIZE
);
477 mthca_dbg(dev
, "Dumping MPT entry %08x:\n", mr
->ibmr
.lkey
);
478 for (i
= 0; i
< sizeof (struct mthca_mpt_entry
) / 4; ++i
) {
480 printk("[%02x] ", i
* 4);
481 printk(" %08x", be32_to_cpu(((__be32
*) mpt_entry
)[i
]));
482 if ((i
+ 1) % 4 == 0)
487 err
= mthca_SW2HW_MPT(dev
, mailbox
,
488 key
& (dev
->limits
.num_mpts
- 1),
491 mthca_warn(dev
, "SW2HW_MPT failed (%d)\n", err
);
492 goto err_out_mailbox
;
494 mthca_warn(dev
, "SW2HW_MPT returned status 0x%02x\n",
497 goto err_out_mailbox
;
500 mthca_free_mailbox(dev
, mailbox
);
504 mthca_free_mailbox(dev
, mailbox
);
507 mthca_table_put(dev
, dev
->mr_table
.mpt_table
, key
);
510 mthca_free(&dev
->mr_table
.mpt_alloc
, key
);
514 int mthca_mr_alloc_notrans(struct mthca_dev
*dev
, u32 pd
,
515 u32 access
, struct mthca_mr
*mr
)
518 return mthca_mr_alloc(dev
, pd
, 12, 0, ~0ULL, access
, mr
);
521 int mthca_mr_alloc_phys(struct mthca_dev
*dev
, u32 pd
,
522 u64
*buffer_list
, int buffer_size_shift
,
523 int list_len
, u64 iova
, u64 total_size
,
524 u32 access
, struct mthca_mr
*mr
)
528 mr
->mtt
= mthca_alloc_mtt(dev
, list_len
);
530 return PTR_ERR(mr
->mtt
);
532 err
= mthca_write_mtt(dev
, mr
->mtt
, 0, buffer_list
, list_len
);
534 mthca_free_mtt(dev
, mr
->mtt
);
538 err
= mthca_mr_alloc(dev
, pd
, buffer_size_shift
, iova
,
539 total_size
, access
, mr
);
541 mthca_free_mtt(dev
, mr
->mtt
);
547 static void mthca_free_region(struct mthca_dev
*dev
, u32 lkey
)
549 mthca_table_put(dev
, dev
->mr_table
.mpt_table
,
550 key_to_hw_index(dev
, lkey
));
552 mthca_free(&dev
->mr_table
.mpt_alloc
, key_to_hw_index(dev
, lkey
));
555 void mthca_free_mr(struct mthca_dev
*dev
, struct mthca_mr
*mr
)
560 err
= mthca_HW2SW_MPT(dev
, NULL
,
561 key_to_hw_index(dev
, mr
->ibmr
.lkey
) &
562 (dev
->limits
.num_mpts
- 1),
565 mthca_warn(dev
, "HW2SW_MPT failed (%d)\n", err
);
567 mthca_warn(dev
, "HW2SW_MPT returned status 0x%02x\n",
570 mthca_free_region(dev
, mr
->ibmr
.lkey
);
571 mthca_free_mtt(dev
, mr
->mtt
);
574 int mthca_fmr_alloc(struct mthca_dev
*dev
, u32 pd
,
575 u32 access
, struct mthca_fmr
*mr
)
577 struct mthca_mpt_entry
*mpt_entry
;
578 struct mthca_mailbox
*mailbox
;
582 int list_len
= mr
->attr
.max_pages
;
586 if (mr
->attr
.page_shift
< 12 || mr
->attr
.page_shift
>= 32)
589 /* For Arbel, all MTTs must fit in the same page. */
590 if (mthca_is_memfree(dev
) &&
591 mr
->attr
.max_pages
* sizeof *mr
->mem
.arbel
.mtts
> PAGE_SIZE
)
596 key
= mthca_alloc(&dev
->mr_table
.mpt_alloc
);
599 key
= adjust_key(dev
, key
);
601 idx
= key
& (dev
->limits
.num_mpts
- 1);
602 mr
->ibmr
.rkey
= mr
->ibmr
.lkey
= hw_index_to_key(dev
, key
);
604 if (mthca_is_memfree(dev
)) {
605 err
= mthca_table_get(dev
, dev
->mr_table
.mpt_table
, key
);
607 goto err_out_mpt_free
;
609 mr
->mem
.arbel
.mpt
= mthca_table_find(dev
->mr_table
.mpt_table
, key
, NULL
);
610 BUG_ON(!mr
->mem
.arbel
.mpt
);
612 mr
->mem
.tavor
.mpt
= dev
->mr_table
.tavor_fmr
.mpt_base
+
613 sizeof *(mr
->mem
.tavor
.mpt
) * idx
;
615 mr
->mtt
= __mthca_alloc_mtt(dev
, list_len
, dev
->mr_table
.fmr_mtt_buddy
);
616 if (IS_ERR(mr
->mtt
)) {
617 err
= PTR_ERR(mr
->mtt
);
621 mtt_seg
= mr
->mtt
->first_seg
* MTHCA_MTT_SEG_SIZE
;
623 if (mthca_is_memfree(dev
)) {
624 mr
->mem
.arbel
.mtts
= mthca_table_find(dev
->mr_table
.mtt_table
,
626 &mr
->mem
.arbel
.dma_handle
);
627 BUG_ON(!mr
->mem
.arbel
.mtts
);
629 mr
->mem
.tavor
.mtts
= dev
->mr_table
.tavor_fmr
.mtt_base
+ mtt_seg
;
631 mailbox
= mthca_alloc_mailbox(dev
, GFP_KERNEL
);
632 if (IS_ERR(mailbox
)) {
633 err
= PTR_ERR(mailbox
);
634 goto err_out_free_mtt
;
637 mpt_entry
= mailbox
->buf
;
639 mpt_entry
->flags
= cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS
|
641 MTHCA_MPT_FLAG_REGION
|
644 mpt_entry
->page_size
= cpu_to_be32(mr
->attr
.page_shift
- 12);
645 mpt_entry
->key
= cpu_to_be32(key
);
646 mpt_entry
->pd
= cpu_to_be32(pd
);
647 memset(&mpt_entry
->start
, 0,
648 sizeof *mpt_entry
- offsetof(struct mthca_mpt_entry
, start
));
649 mpt_entry
->mtt_seg
= cpu_to_be64(dev
->mr_table
.mtt_base
+ mtt_seg
);
652 mthca_dbg(dev
, "Dumping MPT entry %08x:\n", mr
->ibmr
.lkey
);
653 for (i
= 0; i
< sizeof (struct mthca_mpt_entry
) / 4; ++i
) {
655 printk("[%02x] ", i
* 4);
656 printk(" %08x", be32_to_cpu(((__be32
*) mpt_entry
)[i
]));
657 if ((i
+ 1) % 4 == 0)
662 err
= mthca_SW2HW_MPT(dev
, mailbox
,
663 key
& (dev
->limits
.num_mpts
- 1),
666 mthca_warn(dev
, "SW2HW_MPT failed (%d)\n", err
);
667 goto err_out_mailbox_free
;
670 mthca_warn(dev
, "SW2HW_MPT returned status 0x%02x\n",
673 goto err_out_mailbox_free
;
676 mthca_free_mailbox(dev
, mailbox
);
679 err_out_mailbox_free
:
680 mthca_free_mailbox(dev
, mailbox
);
683 mthca_free_mtt(dev
, mr
->mtt
);
686 mthca_table_put(dev
, dev
->mr_table
.mpt_table
, key
);
689 <<<<<<< HEAD
:drivers
/infiniband
/hw
/mthca
/mthca_mr
.c
690 mthca_free(&dev
->mr_table
.mpt_alloc
, mr
->ibmr
.lkey
);
692 mthca_free(&dev
->mr_table
.mpt_alloc
, key
);
693 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a
:drivers
/infiniband
/hw
/mthca
/mthca_mr
.c
697 int mthca_free_fmr(struct mthca_dev
*dev
, struct mthca_fmr
*fmr
)
702 mthca_free_region(dev
, fmr
->ibmr
.lkey
);
703 mthca_free_mtt(dev
, fmr
->mtt
);
708 static inline int mthca_check_fmr(struct mthca_fmr
*fmr
, u64
*page_list
,
709 int list_len
, u64 iova
)
713 if (list_len
> fmr
->attr
.max_pages
)
716 page_mask
= (1 << fmr
->attr
.page_shift
) - 1;
718 /* We are getting page lists, so va must be page aligned. */
719 if (iova
& page_mask
)
722 /* Trust the user not to pass misaligned data in page_list */
724 for (i
= 0; i
< list_len
; ++i
) {
725 if (page_list
[i
] & ~page_mask
)
729 if (fmr
->maps
>= fmr
->attr
.max_maps
)
736 int mthca_tavor_map_phys_fmr(struct ib_fmr
*ibfmr
, u64
*page_list
,
737 int list_len
, u64 iova
)
739 struct mthca_fmr
*fmr
= to_mfmr(ibfmr
);
740 struct mthca_dev
*dev
= to_mdev(ibfmr
->device
);
741 struct mthca_mpt_entry mpt_entry
;
745 err
= mthca_check_fmr(fmr
, page_list
, list_len
, iova
);
751 key
= tavor_key_to_hw_index(fmr
->ibmr
.lkey
);
752 key
+= dev
->limits
.num_mpts
;
753 fmr
->ibmr
.lkey
= fmr
->ibmr
.rkey
= tavor_hw_index_to_key(key
);
755 writeb(MTHCA_MPT_STATUS_SW
, fmr
->mem
.tavor
.mpt
);
757 for (i
= 0; i
< list_len
; ++i
) {
758 __be64 mtt_entry
= cpu_to_be64(page_list
[i
] |
759 MTHCA_MTT_FLAG_PRESENT
);
760 mthca_write64_raw(mtt_entry
, fmr
->mem
.tavor
.mtts
+ i
);
763 mpt_entry
.lkey
= cpu_to_be32(key
);
764 mpt_entry
.length
= cpu_to_be64(list_len
* (1ull << fmr
->attr
.page_shift
));
765 mpt_entry
.start
= cpu_to_be64(iova
);
767 __raw_writel((__force u32
) mpt_entry
.lkey
, &fmr
->mem
.tavor
.mpt
->key
);
768 memcpy_toio(&fmr
->mem
.tavor
.mpt
->start
, &mpt_entry
.start
,
769 offsetof(struct mthca_mpt_entry
, window_count
) -
770 offsetof(struct mthca_mpt_entry
, start
));
772 writeb(MTHCA_MPT_STATUS_HW
, fmr
->mem
.tavor
.mpt
);
777 int mthca_arbel_map_phys_fmr(struct ib_fmr
*ibfmr
, u64
*page_list
,
778 int list_len
, u64 iova
)
780 struct mthca_fmr
*fmr
= to_mfmr(ibfmr
);
781 struct mthca_dev
*dev
= to_mdev(ibfmr
->device
);
785 err
= mthca_check_fmr(fmr
, page_list
, list_len
, iova
);
791 key
= arbel_key_to_hw_index(fmr
->ibmr
.lkey
);
792 if (dev
->mthca_flags
& MTHCA_FLAG_SINAI_OPT
)
793 key
+= SINAI_FMR_KEY_INC
;
795 key
+= dev
->limits
.num_mpts
;
796 fmr
->ibmr
.lkey
= fmr
->ibmr
.rkey
= arbel_hw_index_to_key(key
);
798 *(u8
*) fmr
->mem
.arbel
.mpt
= MTHCA_MPT_STATUS_SW
;
802 for (i
= 0; i
< list_len
; ++i
)
803 fmr
->mem
.arbel
.mtts
[i
] = cpu_to_be64(page_list
[i
] |
804 MTHCA_MTT_FLAG_PRESENT
);
806 dma_sync_single(&dev
->pdev
->dev
, fmr
->mem
.arbel
.dma_handle
,
807 list_len
* sizeof(u64
), DMA_TO_DEVICE
);
809 fmr
->mem
.arbel
.mpt
->key
= cpu_to_be32(key
);
810 fmr
->mem
.arbel
.mpt
->lkey
= cpu_to_be32(key
);
811 fmr
->mem
.arbel
.mpt
->length
= cpu_to_be64(list_len
* (1ull << fmr
->attr
.page_shift
));
812 fmr
->mem
.arbel
.mpt
->start
= cpu_to_be64(iova
);
816 *(u8
*) fmr
->mem
.arbel
.mpt
= MTHCA_MPT_STATUS_HW
;
823 void mthca_tavor_fmr_unmap(struct mthca_dev
*dev
, struct mthca_fmr
*fmr
)
830 key
= tavor_key_to_hw_index(fmr
->ibmr
.lkey
);
831 key
&= dev
->limits
.num_mpts
- 1;
832 fmr
->ibmr
.lkey
= fmr
->ibmr
.rkey
= tavor_hw_index_to_key(key
);
836 writeb(MTHCA_MPT_STATUS_SW
, fmr
->mem
.tavor
.mpt
);
839 void mthca_arbel_fmr_unmap(struct mthca_dev
*dev
, struct mthca_fmr
*fmr
)
846 key
= arbel_key_to_hw_index(fmr
->ibmr
.lkey
);
847 key
&= dev
->limits
.num_mpts
- 1;
848 key
= adjust_key(dev
, key
);
849 fmr
->ibmr
.lkey
= fmr
->ibmr
.rkey
= arbel_hw_index_to_key(key
);
853 *(u8
*) fmr
->mem
.arbel
.mpt
= MTHCA_MPT_STATUS_SW
;
856 int mthca_init_mr_table(struct mthca_dev
*dev
)
859 int mpts
, mtts
, err
, i
;
861 err
= mthca_alloc_init(&dev
->mr_table
.mpt_alloc
,
862 dev
->limits
.num_mpts
,
863 ~0, dev
->limits
.reserved_mrws
);
867 if (!mthca_is_memfree(dev
) &&
868 (dev
->mthca_flags
& MTHCA_FLAG_DDR_HIDDEN
))
869 dev
->limits
.fmr_reserved_mtts
= 0;
871 dev
->mthca_flags
|= MTHCA_FLAG_FMR
;
873 if (dev
->mthca_flags
& MTHCA_FLAG_SINAI_OPT
)
874 mthca_dbg(dev
, "Memory key throughput optimization activated.\n");
876 err
= mthca_buddy_init(&dev
->mr_table
.mtt_buddy
,
877 fls(dev
->limits
.num_mtt_segs
- 1));
882 dev
->mr_table
.tavor_fmr
.mpt_base
= NULL
;
883 dev
->mr_table
.tavor_fmr
.mtt_base
= NULL
;
885 if (dev
->limits
.fmr_reserved_mtts
) {
886 i
= fls(dev
->limits
.fmr_reserved_mtts
- 1);
889 mthca_warn(dev
, "Unable to reserve 2^31 FMR MTTs.\n");
893 mpts
= mtts
= 1 << i
;
895 mtts
= dev
->limits
.num_mtt_segs
;
896 mpts
= dev
->limits
.num_mpts
;
899 if (!mthca_is_memfree(dev
) &&
900 (dev
->mthca_flags
& MTHCA_FLAG_FMR
)) {
902 addr
= pci_resource_start(dev
->pdev
, 4) +
903 ((pci_resource_len(dev
->pdev
, 4) - 1) &
904 dev
->mr_table
.mpt_base
);
906 dev
->mr_table
.tavor_fmr
.mpt_base
=
907 ioremap(addr
, mpts
* sizeof(struct mthca_mpt_entry
));
909 if (!dev
->mr_table
.tavor_fmr
.mpt_base
) {
910 mthca_warn(dev
, "MPT ioremap for FMR failed.\n");
915 addr
= pci_resource_start(dev
->pdev
, 4) +
916 ((pci_resource_len(dev
->pdev
, 4) - 1) &
917 dev
->mr_table
.mtt_base
);
919 dev
->mr_table
.tavor_fmr
.mtt_base
=
920 ioremap(addr
, mtts
* MTHCA_MTT_SEG_SIZE
);
921 if (!dev
->mr_table
.tavor_fmr
.mtt_base
) {
922 mthca_warn(dev
, "MTT ioremap for FMR failed.\n");
928 if (dev
->limits
.fmr_reserved_mtts
) {
929 err
= mthca_buddy_init(&dev
->mr_table
.tavor_fmr
.mtt_buddy
, fls(mtts
- 1));
931 goto err_fmr_mtt_buddy
;
933 /* Prevent regular MRs from using FMR keys */
934 err
= mthca_buddy_alloc(&dev
->mr_table
.mtt_buddy
, fls(mtts
- 1));
936 goto err_reserve_fmr
;
938 dev
->mr_table
.fmr_mtt_buddy
=
939 &dev
->mr_table
.tavor_fmr
.mtt_buddy
;
941 dev
->mr_table
.fmr_mtt_buddy
= &dev
->mr_table
.mtt_buddy
;
943 /* FMR table is always the first, take reserved MTTs out of there */
944 if (dev
->limits
.reserved_mtts
) {
945 i
= fls(dev
->limits
.reserved_mtts
- 1);
947 if (mthca_alloc_mtt_range(dev
, i
,
948 dev
->mr_table
.fmr_mtt_buddy
) == -1) {
949 mthca_warn(dev
, "MTT table of order %d is too small.\n",
950 dev
->mr_table
.fmr_mtt_buddy
->max_order
);
952 goto err_reserve_mtts
;
960 if (dev
->limits
.fmr_reserved_mtts
)
961 mthca_buddy_cleanup(&dev
->mr_table
.tavor_fmr
.mtt_buddy
);
964 if (dev
->mr_table
.tavor_fmr
.mtt_base
)
965 iounmap(dev
->mr_table
.tavor_fmr
.mtt_base
);
968 if (dev
->mr_table
.tavor_fmr
.mpt_base
)
969 iounmap(dev
->mr_table
.tavor_fmr
.mpt_base
);
972 mthca_buddy_cleanup(&dev
->mr_table
.mtt_buddy
);
975 mthca_alloc_cleanup(&dev
->mr_table
.mpt_alloc
);
980 void mthca_cleanup_mr_table(struct mthca_dev
*dev
)
982 /* XXX check if any MRs are still allocated? */
983 if (dev
->limits
.fmr_reserved_mtts
)
984 mthca_buddy_cleanup(&dev
->mr_table
.tavor_fmr
.mtt_buddy
);
986 mthca_buddy_cleanup(&dev
->mr_table
.mtt_buddy
);
988 if (dev
->mr_table
.tavor_fmr
.mtt_base
)
989 iounmap(dev
->mr_table
.tavor_fmr
.mtt_base
);
990 if (dev
->mr_table
.tavor_fmr
.mpt_base
)
991 iounmap(dev
->mr_table
.tavor_fmr
.mpt_base
);
993 mthca_alloc_cleanup(&dev
->mr_table
.mpt_alloc
);