2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/export.h>
38 #include <linux/slab.h>
39 #include <linux/kernel.h>
41 #include <linux/mlx4/cmd.h>
46 #define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
47 #define MLX4_MPT_FLAG_FREE (0x3UL << 28)
48 #define MLX4_MPT_FLAG_MIO (1 << 17)
49 #define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
50 #define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
51 #define MLX4_MPT_FLAG_REGION (1 << 8)
53 #define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27)
54 #define MLX4_MPT_PD_FLAG_RAE (1 << 28)
55 #define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
57 #define MLX4_MPT_STATUS_SW 0xF0
58 #define MLX4_MPT_STATUS_HW 0x00
60 static u32
mlx4_buddy_alloc(struct mlx4_buddy
*buddy
, int order
)
66 spin_lock(&buddy
->lock
);
68 for (o
= order
; o
<= buddy
->max_order
; ++o
)
69 if (buddy
->num_free
[o
]) {
70 m
= 1 << (buddy
->max_order
- o
);
71 seg
= find_first_bit(buddy
->bits
[o
], m
);
76 spin_unlock(&buddy
->lock
);
80 clear_bit(seg
, buddy
->bits
[o
]);
86 set_bit(seg
^ 1, buddy
->bits
[o
]);
90 spin_unlock(&buddy
->lock
);
97 static void mlx4_buddy_free(struct mlx4_buddy
*buddy
, u32 seg
, int order
)
101 spin_lock(&buddy
->lock
);
103 while (test_bit(seg
^ 1, buddy
->bits
[order
])) {
104 clear_bit(seg
^ 1, buddy
->bits
[order
]);
105 --buddy
->num_free
[order
];
110 set_bit(seg
, buddy
->bits
[order
]);
111 ++buddy
->num_free
[order
];
113 spin_unlock(&buddy
->lock
);
116 static int mlx4_buddy_init(struct mlx4_buddy
*buddy
, int max_order
)
120 buddy
->max_order
= max_order
;
121 spin_lock_init(&buddy
->lock
);
123 buddy
->bits
= kzalloc((buddy
->max_order
+ 1) * sizeof (long *),
125 buddy
->num_free
= kcalloc((buddy
->max_order
+ 1), sizeof *buddy
->num_free
,
127 if (!buddy
->bits
|| !buddy
->num_free
)
130 for (i
= 0; i
<= buddy
->max_order
; ++i
) {
131 s
= BITS_TO_LONGS(1 << (buddy
->max_order
- i
));
132 buddy
->bits
[i
] = kmalloc(s
* sizeof (long), GFP_KERNEL
);
135 bitmap_zero(buddy
->bits
[i
], 1 << (buddy
->max_order
- i
));
138 set_bit(0, buddy
->bits
[buddy
->max_order
]);
139 buddy
->num_free
[buddy
->max_order
] = 1;
144 for (i
= 0; i
<= buddy
->max_order
; ++i
)
145 kfree(buddy
->bits
[i
]);
149 kfree(buddy
->num_free
);
154 static void mlx4_buddy_cleanup(struct mlx4_buddy
*buddy
)
158 for (i
= 0; i
<= buddy
->max_order
; ++i
)
159 kfree(buddy
->bits
[i
]);
162 kfree(buddy
->num_free
);
165 u32
__mlx4_alloc_mtt_range(struct mlx4_dev
*dev
, int order
)
167 struct mlx4_mr_table
*mr_table
= &mlx4_priv(dev
)->mr_table
;
172 seg_order
= max_t(int, order
- log_mtts_per_seg
, 0);
174 seg
= mlx4_buddy_alloc(&mr_table
->mtt_buddy
, seg_order
);
178 offset
= seg
* (1 << log_mtts_per_seg
);
180 if (mlx4_table_get_range(dev
, &mr_table
->mtt_table
, offset
,
181 offset
+ (1 << order
) - 1)) {
182 mlx4_buddy_free(&mr_table
->mtt_buddy
, seg
, seg_order
);
189 static u32
mlx4_alloc_mtt_range(struct mlx4_dev
*dev
, int order
)
195 if (mlx4_is_mfunc(dev
)) {
196 set_param_l(&in_param
, order
);
197 err
= mlx4_cmd_imm(dev
, in_param
, &out_param
, RES_MTT
,
198 RES_OP_RESERVE_AND_MAP
,
200 MLX4_CMD_TIME_CLASS_A
,
204 return get_param_l(&out_param
);
206 return __mlx4_alloc_mtt_range(dev
, order
);
209 int mlx4_mtt_init(struct mlx4_dev
*dev
, int npages
, int page_shift
,
210 struct mlx4_mtt
*mtt
)
216 mtt
->page_shift
= MLX4_ICM_PAGE_SHIFT
;
219 mtt
->page_shift
= page_shift
;
221 for (mtt
->order
= 0, i
= 1; i
< npages
; i
<<= 1)
224 mtt
->offset
= mlx4_alloc_mtt_range(dev
, mtt
->order
);
225 if (mtt
->offset
== -1)
230 EXPORT_SYMBOL_GPL(mlx4_mtt_init
);
232 void __mlx4_free_mtt_range(struct mlx4_dev
*dev
, u32 offset
, int order
)
236 struct mlx4_mr_table
*mr_table
= &mlx4_priv(dev
)->mr_table
;
238 seg_order
= max_t(int, order
- log_mtts_per_seg
, 0);
239 first_seg
= offset
/ (1 << log_mtts_per_seg
);
241 mlx4_buddy_free(&mr_table
->mtt_buddy
, first_seg
, seg_order
);
242 mlx4_table_put_range(dev
, &mr_table
->mtt_table
, offset
,
243 offset
+ (1 << order
) - 1);
246 static void mlx4_free_mtt_range(struct mlx4_dev
*dev
, u32 offset
, int order
)
251 if (mlx4_is_mfunc(dev
)) {
252 set_param_l(&in_param
, offset
);
253 set_param_h(&in_param
, order
);
254 err
= mlx4_cmd(dev
, in_param
, RES_MTT
, RES_OP_RESERVE_AND_MAP
,
256 MLX4_CMD_TIME_CLASS_A
,
259 mlx4_warn(dev
, "Failed to free mtt range at:"
260 "%d order:%d\n", offset
, order
);
263 __mlx4_free_mtt_range(dev
, offset
, order
);
266 void mlx4_mtt_cleanup(struct mlx4_dev
*dev
, struct mlx4_mtt
*mtt
)
271 mlx4_free_mtt_range(dev
, mtt
->offset
, mtt
->order
);
273 EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup
);
275 u64
mlx4_mtt_addr(struct mlx4_dev
*dev
, struct mlx4_mtt
*mtt
)
277 return (u64
) mtt
->offset
* dev
->caps
.mtt_entry_sz
;
279 EXPORT_SYMBOL_GPL(mlx4_mtt_addr
);
281 static u32
hw_index_to_key(u32 ind
)
283 return (ind
>> 24) | (ind
<< 8);
286 static u32
key_to_hw_index(u32 key
)
288 return (key
<< 24) | (key
>> 8);
291 static int mlx4_SW2HW_MPT(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*mailbox
,
294 return mlx4_cmd(dev
, mailbox
->dma
, mpt_index
,
295 0, MLX4_CMD_SW2HW_MPT
, MLX4_CMD_TIME_CLASS_B
,
299 static int mlx4_HW2SW_MPT(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*mailbox
,
302 return mlx4_cmd_box(dev
, 0, mailbox
? mailbox
->dma
: 0, mpt_index
,
303 !mailbox
, MLX4_CMD_HW2SW_MPT
,
304 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_WRAPPED
);
307 static int mlx4_mr_alloc_reserved(struct mlx4_dev
*dev
, u32 mridx
, u32 pd
,
308 u64 iova
, u64 size
, u32 access
, int npages
,
309 int page_shift
, struct mlx4_mr
*mr
)
315 mr
->enabled
= MLX4_MR_DISABLED
;
316 mr
->key
= hw_index_to_key(mridx
);
318 return mlx4_mtt_init(dev
, npages
, page_shift
, &mr
->mtt
);
321 static int mlx4_WRITE_MTT(struct mlx4_dev
*dev
,
322 struct mlx4_cmd_mailbox
*mailbox
,
325 return mlx4_cmd(dev
, mailbox
->dma
, num_entries
, 0, MLX4_CMD_WRITE_MTT
,
326 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
329 int __mlx4_mr_reserve(struct mlx4_dev
*dev
)
331 struct mlx4_priv
*priv
= mlx4_priv(dev
);
333 return mlx4_bitmap_alloc(&priv
->mr_table
.mpt_bitmap
);
336 static int mlx4_mr_reserve(struct mlx4_dev
*dev
)
340 if (mlx4_is_mfunc(dev
)) {
341 if (mlx4_cmd_imm(dev
, 0, &out_param
, RES_MPT
, RES_OP_RESERVE
,
343 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
))
345 return get_param_l(&out_param
);
347 return __mlx4_mr_reserve(dev
);
350 void __mlx4_mr_release(struct mlx4_dev
*dev
, u32 index
)
352 struct mlx4_priv
*priv
= mlx4_priv(dev
);
354 mlx4_bitmap_free(&priv
->mr_table
.mpt_bitmap
, index
);
357 static void mlx4_mr_release(struct mlx4_dev
*dev
, u32 index
)
361 if (mlx4_is_mfunc(dev
)) {
362 set_param_l(&in_param
, index
);
363 if (mlx4_cmd(dev
, in_param
, RES_MPT
, RES_OP_RESERVE
,
365 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
))
366 mlx4_warn(dev
, "Failed to release mr index:%d\n",
370 __mlx4_mr_release(dev
, index
);
373 int __mlx4_mr_alloc_icm(struct mlx4_dev
*dev
, u32 index
)
375 struct mlx4_mr_table
*mr_table
= &mlx4_priv(dev
)->mr_table
;
377 return mlx4_table_get(dev
, &mr_table
->dmpt_table
, index
);
380 static int mlx4_mr_alloc_icm(struct mlx4_dev
*dev
, u32 index
)
384 if (mlx4_is_mfunc(dev
)) {
385 set_param_l(¶m
, index
);
386 return mlx4_cmd_imm(dev
, param
, ¶m
, RES_MPT
, RES_OP_MAP_ICM
,
388 MLX4_CMD_TIME_CLASS_A
,
391 return __mlx4_mr_alloc_icm(dev
, index
);
394 void __mlx4_mr_free_icm(struct mlx4_dev
*dev
, u32 index
)
396 struct mlx4_mr_table
*mr_table
= &mlx4_priv(dev
)->mr_table
;
398 mlx4_table_put(dev
, &mr_table
->dmpt_table
, index
);
401 static void mlx4_mr_free_icm(struct mlx4_dev
*dev
, u32 index
)
405 if (mlx4_is_mfunc(dev
)) {
406 set_param_l(&in_param
, index
);
407 if (mlx4_cmd(dev
, in_param
, RES_MPT
, RES_OP_MAP_ICM
,
408 MLX4_CMD_FREE_RES
, MLX4_CMD_TIME_CLASS_A
,
410 mlx4_warn(dev
, "Failed to free icm of mr index:%d\n",
414 return __mlx4_mr_free_icm(dev
, index
);
417 int mlx4_mr_alloc(struct mlx4_dev
*dev
, u32 pd
, u64 iova
, u64 size
, u32 access
,
418 int npages
, int page_shift
, struct mlx4_mr
*mr
)
423 index
= mlx4_mr_reserve(dev
);
427 err
= mlx4_mr_alloc_reserved(dev
, index
, pd
, iova
, size
,
428 access
, npages
, page_shift
, mr
);
430 mlx4_mr_release(dev
, index
);
434 EXPORT_SYMBOL_GPL(mlx4_mr_alloc
);
436 static void mlx4_mr_free_reserved(struct mlx4_dev
*dev
, struct mlx4_mr
*mr
)
440 if (mr
->enabled
== MLX4_MR_EN_HW
) {
441 err
= mlx4_HW2SW_MPT(dev
, NULL
,
442 key_to_hw_index(mr
->key
) &
443 (dev
->caps
.num_mpts
- 1));
445 mlx4_warn(dev
, "xxx HW2SW_MPT failed (%d)\n", err
);
447 mr
->enabled
= MLX4_MR_EN_SW
;
449 mlx4_mtt_cleanup(dev
, &mr
->mtt
);
452 void mlx4_mr_free(struct mlx4_dev
*dev
, struct mlx4_mr
*mr
)
454 mlx4_mr_free_reserved(dev
, mr
);
456 mlx4_mr_free_icm(dev
, key_to_hw_index(mr
->key
));
457 mlx4_mr_release(dev
, key_to_hw_index(mr
->key
));
459 EXPORT_SYMBOL_GPL(mlx4_mr_free
);
461 int mlx4_mr_enable(struct mlx4_dev
*dev
, struct mlx4_mr
*mr
)
463 struct mlx4_cmd_mailbox
*mailbox
;
464 struct mlx4_mpt_entry
*mpt_entry
;
467 err
= mlx4_mr_alloc_icm(dev
, key_to_hw_index(mr
->key
));
471 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
472 if (IS_ERR(mailbox
)) {
473 err
= PTR_ERR(mailbox
);
476 mpt_entry
= mailbox
->buf
;
478 memset(mpt_entry
, 0, sizeof *mpt_entry
);
480 mpt_entry
->flags
= cpu_to_be32(MLX4_MPT_FLAG_MIO
|
481 MLX4_MPT_FLAG_REGION
|
484 mpt_entry
->key
= cpu_to_be32(key_to_hw_index(mr
->key
));
485 mpt_entry
->pd_flags
= cpu_to_be32(mr
->pd
| MLX4_MPT_PD_FLAG_EN_INV
);
486 mpt_entry
->start
= cpu_to_be64(mr
->iova
);
487 mpt_entry
->length
= cpu_to_be64(mr
->size
);
488 mpt_entry
->entity_size
= cpu_to_be32(mr
->mtt
.page_shift
);
490 if (mr
->mtt
.order
< 0) {
491 mpt_entry
->flags
|= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL
);
492 mpt_entry
->mtt_addr
= 0;
494 mpt_entry
->mtt_addr
= cpu_to_be64(mlx4_mtt_addr(dev
,
498 if (mr
->mtt
.order
>= 0 && mr
->mtt
.page_shift
== 0) {
499 /* fast register MR in free state */
500 mpt_entry
->flags
|= cpu_to_be32(MLX4_MPT_FLAG_FREE
);
501 mpt_entry
->pd_flags
|= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG
|
502 MLX4_MPT_PD_FLAG_RAE
);
503 mpt_entry
->mtt_sz
= cpu_to_be32(1 << mr
->mtt
.order
);
505 mpt_entry
->flags
|= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS
);
508 err
= mlx4_SW2HW_MPT(dev
, mailbox
,
509 key_to_hw_index(mr
->key
) & (dev
->caps
.num_mpts
- 1));
511 mlx4_warn(dev
, "SW2HW_MPT failed (%d)\n", err
);
514 mr
->enabled
= MLX4_MR_EN_HW
;
516 mlx4_free_cmd_mailbox(dev
, mailbox
);
521 mlx4_free_cmd_mailbox(dev
, mailbox
);
524 mlx4_mr_free_icm(dev
, key_to_hw_index(mr
->key
));
527 EXPORT_SYMBOL_GPL(mlx4_mr_enable
);
529 static int mlx4_write_mtt_chunk(struct mlx4_dev
*dev
, struct mlx4_mtt
*mtt
,
530 int start_index
, int npages
, u64
*page_list
)
532 struct mlx4_priv
*priv
= mlx4_priv(dev
);
534 dma_addr_t dma_handle
;
537 mtts
= mlx4_table_find(&priv
->mr_table
.mtt_table
, mtt
->offset
+
538 start_index
, &dma_handle
);
543 dma_sync_single_for_cpu(&dev
->pdev
->dev
, dma_handle
,
544 npages
* sizeof (u64
), DMA_TO_DEVICE
);
546 for (i
= 0; i
< npages
; ++i
)
547 mtts
[i
] = cpu_to_be64(page_list
[i
] | MLX4_MTT_FLAG_PRESENT
);
549 dma_sync_single_for_device(&dev
->pdev
->dev
, dma_handle
,
550 npages
* sizeof (u64
), DMA_TO_DEVICE
);
555 int __mlx4_write_mtt(struct mlx4_dev
*dev
, struct mlx4_mtt
*mtt
,
556 int start_index
, int npages
, u64
*page_list
)
561 int max_mtts_first_page
;
563 /* compute how may mtts fit in the first page */
564 mtts_per_page
= PAGE_SIZE
/ sizeof(u64
);
565 max_mtts_first_page
= mtts_per_page
- (mtt
->offset
+ start_index
)
568 chunk
= min_t(int, max_mtts_first_page
, npages
);
571 err
= mlx4_write_mtt_chunk(dev
, mtt
, start_index
, chunk
, page_list
);
575 start_index
+= chunk
;
578 chunk
= min_t(int, mtts_per_page
, npages
);
583 int mlx4_write_mtt(struct mlx4_dev
*dev
, struct mlx4_mtt
*mtt
,
584 int start_index
, int npages
, u64
*page_list
)
586 struct mlx4_cmd_mailbox
*mailbox
= NULL
;
587 __be64
*inbox
= NULL
;
595 if (mlx4_is_mfunc(dev
)) {
596 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
598 return PTR_ERR(mailbox
);
599 inbox
= mailbox
->buf
;
602 chunk
= min_t(int, MLX4_MAILBOX_SIZE
/ sizeof(u64
) - 2,
604 inbox
[0] = cpu_to_be64(mtt
->offset
+ start_index
);
606 for (i
= 0; i
< chunk
; ++i
)
607 inbox
[i
+ 2] = cpu_to_be64(page_list
[i
] |
608 MLX4_MTT_FLAG_PRESENT
);
609 err
= mlx4_WRITE_MTT(dev
, mailbox
, chunk
);
611 mlx4_free_cmd_mailbox(dev
, mailbox
);
616 start_index
+= chunk
;
619 mlx4_free_cmd_mailbox(dev
, mailbox
);
623 return __mlx4_write_mtt(dev
, mtt
, start_index
, npages
, page_list
);
625 EXPORT_SYMBOL_GPL(mlx4_write_mtt
);
627 int mlx4_buf_write_mtt(struct mlx4_dev
*dev
, struct mlx4_mtt
*mtt
,
628 struct mlx4_buf
*buf
)
634 page_list
= kmalloc(buf
->npages
* sizeof *page_list
, GFP_KERNEL
);
638 for (i
= 0; i
< buf
->npages
; ++i
)
640 page_list
[i
] = buf
->direct
.map
+ (i
<< buf
->page_shift
);
642 page_list
[i
] = buf
->page_list
[i
].map
;
644 err
= mlx4_write_mtt(dev
, mtt
, 0, buf
->npages
, page_list
);
649 EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt
);
651 int mlx4_init_mr_table(struct mlx4_dev
*dev
)
653 struct mlx4_priv
*priv
= mlx4_priv(dev
);
654 struct mlx4_mr_table
*mr_table
= &priv
->mr_table
;
657 if (!is_power_of_2(dev
->caps
.num_mpts
))
660 /* Nothing to do for slaves - all MR handling is forwarded
662 if (mlx4_is_slave(dev
))
665 err
= mlx4_bitmap_init(&mr_table
->mpt_bitmap
, dev
->caps
.num_mpts
,
666 ~0, dev
->caps
.reserved_mrws
, 0);
670 err
= mlx4_buddy_init(&mr_table
->mtt_buddy
,
671 ilog2(dev
->caps
.num_mtts
/
672 (1 << log_mtts_per_seg
)));
676 if (dev
->caps
.reserved_mtts
) {
677 priv
->reserved_mtts
=
678 mlx4_alloc_mtt_range(dev
,
679 fls(dev
->caps
.reserved_mtts
- 1));
680 if (priv
->reserved_mtts
< 0) {
681 mlx4_warn(dev
, "MTT table of order %d is too small.\n",
682 mr_table
->mtt_buddy
.max_order
);
684 goto err_reserve_mtts
;
691 mlx4_buddy_cleanup(&mr_table
->mtt_buddy
);
694 mlx4_bitmap_cleanup(&mr_table
->mpt_bitmap
);
699 void mlx4_cleanup_mr_table(struct mlx4_dev
*dev
)
701 struct mlx4_priv
*priv
= mlx4_priv(dev
);
702 struct mlx4_mr_table
*mr_table
= &priv
->mr_table
;
704 if (mlx4_is_slave(dev
))
706 if (priv
->reserved_mtts
>= 0)
707 mlx4_free_mtt_range(dev
, priv
->reserved_mtts
,
708 fls(dev
->caps
.reserved_mtts
- 1));
709 mlx4_buddy_cleanup(&mr_table
->mtt_buddy
);
710 mlx4_bitmap_cleanup(&mr_table
->mpt_bitmap
);
713 static inline int mlx4_check_fmr(struct mlx4_fmr
*fmr
, u64
*page_list
,
714 int npages
, u64 iova
)
718 if (npages
> fmr
->max_pages
)
721 page_mask
= (1 << fmr
->page_shift
) - 1;
723 /* We are getting page lists, so va must be page aligned. */
724 if (iova
& page_mask
)
727 /* Trust the user not to pass misaligned data in page_list */
729 for (i
= 0; i
< npages
; ++i
) {
730 if (page_list
[i
] & ~page_mask
)
734 if (fmr
->maps
>= fmr
->max_maps
)
740 int mlx4_map_phys_fmr(struct mlx4_dev
*dev
, struct mlx4_fmr
*fmr
, u64
*page_list
,
741 int npages
, u64 iova
, u32
*lkey
, u32
*rkey
)
746 err
= mlx4_check_fmr(fmr
, page_list
, npages
, iova
);
752 key
= key_to_hw_index(fmr
->mr
.key
);
753 key
+= dev
->caps
.num_mpts
;
754 *lkey
= *rkey
= fmr
->mr
.key
= hw_index_to_key(key
);
756 *(u8
*) fmr
->mpt
= MLX4_MPT_STATUS_SW
;
758 /* Make sure MPT status is visible before writing MTT entries */
761 dma_sync_single_for_cpu(&dev
->pdev
->dev
, fmr
->dma_handle
,
762 npages
* sizeof(u64
), DMA_TO_DEVICE
);
764 for (i
= 0; i
< npages
; ++i
)
765 fmr
->mtts
[i
] = cpu_to_be64(page_list
[i
] | MLX4_MTT_FLAG_PRESENT
);
767 dma_sync_single_for_device(&dev
->pdev
->dev
, fmr
->dma_handle
,
768 npages
* sizeof(u64
), DMA_TO_DEVICE
);
770 fmr
->mpt
->key
= cpu_to_be32(key
);
771 fmr
->mpt
->lkey
= cpu_to_be32(key
);
772 fmr
->mpt
->length
= cpu_to_be64(npages
* (1ull << fmr
->page_shift
));
773 fmr
->mpt
->start
= cpu_to_be64(iova
);
775 /* Make MTT entries are visible before setting MPT status */
778 *(u8
*) fmr
->mpt
= MLX4_MPT_STATUS_HW
;
780 /* Make sure MPT status is visible before consumer can use FMR */
785 EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr
);
787 int mlx4_fmr_alloc(struct mlx4_dev
*dev
, u32 pd
, u32 access
, int max_pages
,
788 int max_maps
, u8 page_shift
, struct mlx4_fmr
*fmr
)
790 struct mlx4_priv
*priv
= mlx4_priv(dev
);
794 if (max_maps
> dev
->caps
.max_fmr_maps
)
797 if (page_shift
< (ffs(dev
->caps
.page_size_cap
) - 1) || page_shift
>= 32)
800 /* All MTTs must fit in the same page */
801 if (max_pages
* sizeof *fmr
->mtts
> PAGE_SIZE
)
804 fmr
->page_shift
= page_shift
;
805 fmr
->max_pages
= max_pages
;
806 fmr
->max_maps
= max_maps
;
809 err
= mlx4_mr_alloc(dev
, pd
, 0, 0, access
, max_pages
,
810 page_shift
, &fmr
->mr
);
814 mtt_offset
= fmr
->mr
.mtt
.offset
* dev
->caps
.mtt_entry_sz
;
816 fmr
->mtts
= mlx4_table_find(&priv
->mr_table
.mtt_table
,
828 mlx4_mr_free(dev
, &fmr
->mr
);
831 EXPORT_SYMBOL_GPL(mlx4_fmr_alloc
);
833 int mlx4_fmr_enable(struct mlx4_dev
*dev
, struct mlx4_fmr
*fmr
)
835 struct mlx4_priv
*priv
= mlx4_priv(dev
);
838 err
= mlx4_mr_enable(dev
, &fmr
->mr
);
842 fmr
->mpt
= mlx4_table_find(&priv
->mr_table
.dmpt_table
,
843 key_to_hw_index(fmr
->mr
.key
), NULL
);
849 EXPORT_SYMBOL_GPL(mlx4_fmr_enable
);
851 void mlx4_fmr_unmap(struct mlx4_dev
*dev
, struct mlx4_fmr
*fmr
,
852 u32
*lkey
, u32
*rkey
)
854 struct mlx4_cmd_mailbox
*mailbox
;
862 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
863 if (IS_ERR(mailbox
)) {
864 err
= PTR_ERR(mailbox
);
865 printk(KERN_WARNING
"mlx4_ib: mlx4_alloc_cmd_mailbox"
866 " failed (%d)\n", err
);
870 err
= mlx4_HW2SW_MPT(dev
, NULL
,
871 key_to_hw_index(fmr
->mr
.key
) &
872 (dev
->caps
.num_mpts
- 1));
873 mlx4_free_cmd_mailbox(dev
, mailbox
);
875 printk(KERN_WARNING
"mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n",
879 fmr
->mr
.enabled
= MLX4_MR_EN_SW
;
881 EXPORT_SYMBOL_GPL(mlx4_fmr_unmap
);
883 int mlx4_fmr_free(struct mlx4_dev
*dev
, struct mlx4_fmr
*fmr
)
888 mlx4_mr_free(dev
, &fmr
->mr
);
889 fmr
->mr
.enabled
= MLX4_MR_DISABLED
;
893 EXPORT_SYMBOL_GPL(mlx4_fmr_free
);
895 int mlx4_SYNC_TPT(struct mlx4_dev
*dev
)
897 return mlx4_cmd(dev
, 0, 0, 0, MLX4_CMD_SYNC_TPT
, 1000,
900 EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT
);