2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/export.h>
38 #include <linux/slab.h>
39 #include <linux/kernel.h>
41 #include <linux/mlx4/cmd.h>
46 #define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
47 #define MLX4_MPT_FLAG_FREE (0x3UL << 28)
48 #define MLX4_MPT_FLAG_MIO (1 << 17)
49 #define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
50 #define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
51 #define MLX4_MPT_FLAG_REGION (1 << 8)
53 #define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27)
54 #define MLX4_MPT_PD_FLAG_RAE (1 << 28)
55 #define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
57 #define MLX4_MPT_STATUS_SW 0xF0
58 #define MLX4_MPT_STATUS_HW 0x00
60 static u32
mlx4_buddy_alloc(struct mlx4_buddy
*buddy
, int order
)
66 spin_lock(&buddy
->lock
);
68 for (o
= order
; o
<= buddy
->max_order
; ++o
)
69 if (buddy
->num_free
[o
]) {
70 m
= 1 << (buddy
->max_order
- o
);
71 seg
= find_first_bit(buddy
->bits
[o
], m
);
76 spin_unlock(&buddy
->lock
);
80 clear_bit(seg
, buddy
->bits
[o
]);
86 set_bit(seg
^ 1, buddy
->bits
[o
]);
90 spin_unlock(&buddy
->lock
);
97 static void mlx4_buddy_free(struct mlx4_buddy
*buddy
, u32 seg
, int order
)
101 spin_lock(&buddy
->lock
);
103 while (test_bit(seg
^ 1, buddy
->bits
[order
])) {
104 clear_bit(seg
^ 1, buddy
->bits
[order
]);
105 --buddy
->num_free
[order
];
110 set_bit(seg
, buddy
->bits
[order
]);
111 ++buddy
->num_free
[order
];
113 spin_unlock(&buddy
->lock
);
116 static int mlx4_buddy_init(struct mlx4_buddy
*buddy
, int max_order
)
120 buddy
->max_order
= max_order
;
121 spin_lock_init(&buddy
->lock
);
123 buddy
->bits
= kzalloc((buddy
->max_order
+ 1) * sizeof (long *),
125 buddy
->num_free
= kcalloc((buddy
->max_order
+ 1), sizeof *buddy
->num_free
,
127 if (!buddy
->bits
|| !buddy
->num_free
)
130 for (i
= 0; i
<= buddy
->max_order
; ++i
) {
131 s
= BITS_TO_LONGS(1 << (buddy
->max_order
- i
));
132 buddy
->bits
[i
] = kmalloc(s
* sizeof (long), GFP_KERNEL
);
135 bitmap_zero(buddy
->bits
[i
], 1 << (buddy
->max_order
- i
));
138 set_bit(0, buddy
->bits
[buddy
->max_order
]);
139 buddy
->num_free
[buddy
->max_order
] = 1;
144 for (i
= 0; i
<= buddy
->max_order
; ++i
)
145 kfree(buddy
->bits
[i
]);
149 kfree(buddy
->num_free
);
154 static void mlx4_buddy_cleanup(struct mlx4_buddy
*buddy
)
158 for (i
= 0; i
<= buddy
->max_order
; ++i
)
159 kfree(buddy
->bits
[i
]);
162 kfree(buddy
->num_free
);
165 u32
__mlx4_alloc_mtt_range(struct mlx4_dev
*dev
, int order
)
167 struct mlx4_mr_table
*mr_table
= &mlx4_priv(dev
)->mr_table
;
172 seg_order
= max_t(int, order
- log_mtts_per_seg
, 0);
174 seg
= mlx4_buddy_alloc(&mr_table
->mtt_buddy
, seg_order
);
178 offset
= seg
* (1 << log_mtts_per_seg
);
180 if (mlx4_table_get_range(dev
, &mr_table
->mtt_table
, offset
,
181 offset
+ (1 << order
) - 1)) {
182 mlx4_buddy_free(&mr_table
->mtt_buddy
, seg
, seg_order
);
189 static u32
mlx4_alloc_mtt_range(struct mlx4_dev
*dev
, int order
)
195 if (mlx4_is_mfunc(dev
)) {
196 set_param_l(&in_param
, order
);
197 err
= mlx4_cmd_imm(dev
, in_param
, &out_param
, RES_MTT
,
198 RES_OP_RESERVE_AND_MAP
,
200 MLX4_CMD_TIME_CLASS_A
,
204 return get_param_l(&out_param
);
206 return __mlx4_alloc_mtt_range(dev
, order
);
209 int mlx4_mtt_init(struct mlx4_dev
*dev
, int npages
, int page_shift
,
210 struct mlx4_mtt
*mtt
)
216 mtt
->page_shift
= MLX4_ICM_PAGE_SHIFT
;
219 mtt
->page_shift
= page_shift
;
221 for (mtt
->order
= 0, i
= 1; i
< npages
; i
<<= 1)
224 mtt
->offset
= mlx4_alloc_mtt_range(dev
, mtt
->order
);
225 if (mtt
->offset
== -1)
230 EXPORT_SYMBOL_GPL(mlx4_mtt_init
);
232 void __mlx4_free_mtt_range(struct mlx4_dev
*dev
, u32 offset
, int order
)
236 struct mlx4_mr_table
*mr_table
= &mlx4_priv(dev
)->mr_table
;
238 seg_order
= max_t(int, order
- log_mtts_per_seg
, 0);
239 first_seg
= offset
/ (1 << log_mtts_per_seg
);
241 mlx4_buddy_free(&mr_table
->mtt_buddy
, first_seg
, seg_order
);
242 mlx4_table_put_range(dev
, &mr_table
->mtt_table
, offset
,
243 offset
+ (1 << order
) - 1);
246 static void mlx4_free_mtt_range(struct mlx4_dev
*dev
, u32 offset
, int order
)
251 if (mlx4_is_mfunc(dev
)) {
252 set_param_l(&in_param
, offset
);
253 set_param_h(&in_param
, order
);
254 err
= mlx4_cmd(dev
, in_param
, RES_MTT
, RES_OP_RESERVE_AND_MAP
,
256 MLX4_CMD_TIME_CLASS_A
,
259 mlx4_warn(dev
, "Failed to free mtt range at:"
260 "%d order:%d\n", offset
, order
);
263 __mlx4_free_mtt_range(dev
, offset
, order
);
266 void mlx4_mtt_cleanup(struct mlx4_dev
*dev
, struct mlx4_mtt
*mtt
)
271 mlx4_free_mtt_range(dev
, mtt
->offset
, mtt
->order
);
273 EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup
);
275 u64
mlx4_mtt_addr(struct mlx4_dev
*dev
, struct mlx4_mtt
*mtt
)
277 return (u64
) mtt
->offset
* dev
->caps
.mtt_entry_sz
;
279 EXPORT_SYMBOL_GPL(mlx4_mtt_addr
);
281 static u32
hw_index_to_key(u32 ind
)
283 return (ind
>> 24) | (ind
<< 8);
286 static u32
key_to_hw_index(u32 key
)
288 return (key
<< 24) | (key
>> 8);
291 static int mlx4_SW2HW_MPT(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*mailbox
,
294 return mlx4_cmd(dev
, mailbox
->dma
, mpt_index
,
295 0, MLX4_CMD_SW2HW_MPT
, MLX4_CMD_TIME_CLASS_B
,
299 static int mlx4_HW2SW_MPT(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*mailbox
,
302 return mlx4_cmd_box(dev
, 0, mailbox
? mailbox
->dma
: 0, mpt_index
,
303 !mailbox
, MLX4_CMD_HW2SW_MPT
,
304 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_WRAPPED
);
307 int mlx4_mr_reserve_range(struct mlx4_dev
*dev
, int cnt
, int align
,
310 struct mlx4_priv
*priv
= mlx4_priv(dev
);
313 mridx
= mlx4_bitmap_alloc_range(&priv
->mr_table
.mpt_bitmap
, cnt
, align
);
321 EXPORT_SYMBOL_GPL(mlx4_mr_reserve_range
);
323 void mlx4_mr_release_range(struct mlx4_dev
*dev
, u32 base_mridx
, int cnt
)
325 struct mlx4_priv
*priv
= mlx4_priv(dev
);
326 mlx4_bitmap_free_range(&priv
->mr_table
.mpt_bitmap
, base_mridx
, cnt
);
328 EXPORT_SYMBOL_GPL(mlx4_mr_release_range
);
330 int mlx4_mr_alloc_reserved(struct mlx4_dev
*dev
, u32 mridx
, u32 pd
,
331 u64 iova
, u64 size
, u32 access
, int npages
,
332 int page_shift
, struct mlx4_mr
*mr
)
338 mr
->enabled
= MLX4_MR_DISABLED
;
339 mr
->key
= hw_index_to_key(mridx
);
341 return mlx4_mtt_init(dev
, npages
, page_shift
, &mr
->mtt
);
343 EXPORT_SYMBOL_GPL(mlx4_mr_alloc_reserved
);
345 static int mlx4_WRITE_MTT(struct mlx4_dev
*dev
,
346 struct mlx4_cmd_mailbox
*mailbox
,
349 return mlx4_cmd(dev
, mailbox
->dma
, num_entries
, 0, MLX4_CMD_WRITE_MTT
,
350 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
353 int __mlx4_mr_reserve(struct mlx4_dev
*dev
)
355 struct mlx4_priv
*priv
= mlx4_priv(dev
);
357 return mlx4_bitmap_alloc(&priv
->mr_table
.mpt_bitmap
);
360 static int mlx4_mr_reserve(struct mlx4_dev
*dev
)
364 if (mlx4_is_mfunc(dev
)) {
365 if (mlx4_cmd_imm(dev
, 0, &out_param
, RES_MPT
, RES_OP_RESERVE
,
367 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
))
369 return get_param_l(&out_param
);
371 return __mlx4_mr_reserve(dev
);
374 void __mlx4_mr_release(struct mlx4_dev
*dev
, u32 index
)
376 struct mlx4_priv
*priv
= mlx4_priv(dev
);
378 mlx4_bitmap_free(&priv
->mr_table
.mpt_bitmap
, index
);
381 static void mlx4_mr_release(struct mlx4_dev
*dev
, u32 index
)
385 if (mlx4_is_mfunc(dev
)) {
386 set_param_l(&in_param
, index
);
387 if (mlx4_cmd(dev
, in_param
, RES_MPT
, RES_OP_RESERVE
,
389 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
))
390 mlx4_warn(dev
, "Failed to release mr index:%d\n",
394 __mlx4_mr_release(dev
, index
);
397 int __mlx4_mr_alloc_icm(struct mlx4_dev
*dev
, u32 index
)
399 struct mlx4_mr_table
*mr_table
= &mlx4_priv(dev
)->mr_table
;
401 return mlx4_table_get(dev
, &mr_table
->dmpt_table
, index
);
404 static int mlx4_mr_alloc_icm(struct mlx4_dev
*dev
, u32 index
)
408 if (mlx4_is_mfunc(dev
)) {
409 set_param_l(¶m
, index
);
410 return mlx4_cmd_imm(dev
, param
, ¶m
, RES_MPT
, RES_OP_MAP_ICM
,
412 MLX4_CMD_TIME_CLASS_A
,
415 return __mlx4_mr_alloc_icm(dev
, index
);
418 void __mlx4_mr_free_icm(struct mlx4_dev
*dev
, u32 index
)
420 struct mlx4_mr_table
*mr_table
= &mlx4_priv(dev
)->mr_table
;
422 mlx4_table_put(dev
, &mr_table
->dmpt_table
, index
);
425 static void mlx4_mr_free_icm(struct mlx4_dev
*dev
, u32 index
)
429 if (mlx4_is_mfunc(dev
)) {
430 set_param_l(&in_param
, index
);
431 if (mlx4_cmd(dev
, in_param
, RES_MPT
, RES_OP_MAP_ICM
,
432 MLX4_CMD_FREE_RES
, MLX4_CMD_TIME_CLASS_A
,
434 mlx4_warn(dev
, "Failed to free icm of mr index:%d\n",
438 return __mlx4_mr_free_icm(dev
, index
);
441 int mlx4_mr_alloc(struct mlx4_dev
*dev
, u32 pd
, u64 iova
, u64 size
, u32 access
,
442 int npages
, int page_shift
, struct mlx4_mr
*mr
)
447 index
= mlx4_mr_reserve(dev
);
451 err
= mlx4_mr_alloc_reserved(dev
, index
, pd
, iova
, size
,
452 access
, npages
, page_shift
, mr
);
454 mlx4_mr_release(dev
, index
);
458 EXPORT_SYMBOL_GPL(mlx4_mr_alloc
);
460 void mlx4_mr_free_reserved(struct mlx4_dev
*dev
, struct mlx4_mr
*mr
)
464 if (mr
->enabled
== MLX4_MR_EN_HW
) {
465 err
= mlx4_HW2SW_MPT(dev
, NULL
,
466 key_to_hw_index(mr
->key
) &
467 (dev
->caps
.num_mpts
- 1));
469 mlx4_warn(dev
, "xxx HW2SW_MPT failed (%d)\n", err
);
471 mr
->enabled
= MLX4_MR_EN_SW
;
473 mlx4_mtt_cleanup(dev
, &mr
->mtt
);
475 EXPORT_SYMBOL_GPL(mlx4_mr_free_reserved
);
477 void mlx4_mr_free(struct mlx4_dev
*dev
, struct mlx4_mr
*mr
)
479 mlx4_mr_free_reserved(dev
, mr
);
481 mlx4_mr_free_icm(dev
, key_to_hw_index(mr
->key
));
482 mlx4_mr_release(dev
, key_to_hw_index(mr
->key
));
484 EXPORT_SYMBOL_GPL(mlx4_mr_free
);
486 int mlx4_mr_enable(struct mlx4_dev
*dev
, struct mlx4_mr
*mr
)
488 struct mlx4_cmd_mailbox
*mailbox
;
489 struct mlx4_mpt_entry
*mpt_entry
;
492 err
= mlx4_mr_alloc_icm(dev
, key_to_hw_index(mr
->key
));
496 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
497 if (IS_ERR(mailbox
)) {
498 err
= PTR_ERR(mailbox
);
501 mpt_entry
= mailbox
->buf
;
503 memset(mpt_entry
, 0, sizeof *mpt_entry
);
505 mpt_entry
->flags
= cpu_to_be32(MLX4_MPT_FLAG_MIO
|
506 MLX4_MPT_FLAG_REGION
|
509 mpt_entry
->key
= cpu_to_be32(key_to_hw_index(mr
->key
));
510 mpt_entry
->pd_flags
= cpu_to_be32(mr
->pd
| MLX4_MPT_PD_FLAG_EN_INV
);
511 mpt_entry
->start
= cpu_to_be64(mr
->iova
);
512 mpt_entry
->length
= cpu_to_be64(mr
->size
);
513 mpt_entry
->entity_size
= cpu_to_be32(mr
->mtt
.page_shift
);
515 if (mr
->mtt
.order
< 0) {
516 mpt_entry
->flags
|= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL
);
517 mpt_entry
->mtt_addr
= 0;
519 mpt_entry
->mtt_addr
= cpu_to_be64(mlx4_mtt_addr(dev
,
523 if (mr
->mtt
.order
>= 0 && mr
->mtt
.page_shift
== 0) {
524 /* fast register MR in free state */
525 mpt_entry
->flags
|= cpu_to_be32(MLX4_MPT_FLAG_FREE
);
526 mpt_entry
->pd_flags
|= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG
|
527 MLX4_MPT_PD_FLAG_RAE
);
528 mpt_entry
->mtt_sz
= cpu_to_be32(1 << mr
->mtt
.order
);
530 mpt_entry
->flags
|= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS
);
533 err
= mlx4_SW2HW_MPT(dev
, mailbox
,
534 key_to_hw_index(mr
->key
) & (dev
->caps
.num_mpts
- 1));
536 mlx4_warn(dev
, "SW2HW_MPT failed (%d)\n", err
);
539 mr
->enabled
= MLX4_MR_EN_HW
;
541 mlx4_free_cmd_mailbox(dev
, mailbox
);
546 mlx4_free_cmd_mailbox(dev
, mailbox
);
549 mlx4_mr_free_icm(dev
, key_to_hw_index(mr
->key
));
552 EXPORT_SYMBOL_GPL(mlx4_mr_enable
);
554 static int mlx4_write_mtt_chunk(struct mlx4_dev
*dev
, struct mlx4_mtt
*mtt
,
555 int start_index
, int npages
, u64
*page_list
)
557 struct mlx4_priv
*priv
= mlx4_priv(dev
);
559 dma_addr_t dma_handle
;
562 mtts
= mlx4_table_find(&priv
->mr_table
.mtt_table
, mtt
->offset
+
563 start_index
, &dma_handle
);
568 dma_sync_single_for_cpu(&dev
->pdev
->dev
, dma_handle
,
569 npages
* sizeof (u64
), DMA_TO_DEVICE
);
571 for (i
= 0; i
< npages
; ++i
)
572 mtts
[i
] = cpu_to_be64(page_list
[i
] | MLX4_MTT_FLAG_PRESENT
);
574 dma_sync_single_for_device(&dev
->pdev
->dev
, dma_handle
,
575 npages
* sizeof (u64
), DMA_TO_DEVICE
);
580 int __mlx4_write_mtt(struct mlx4_dev
*dev
, struct mlx4_mtt
*mtt
,
581 int start_index
, int npages
, u64
*page_list
)
586 int max_mtts_first_page
;
588 /* compute how may mtts fit in the first page */
589 mtts_per_page
= PAGE_SIZE
/ sizeof(u64
);
590 max_mtts_first_page
= mtts_per_page
- (mtt
->offset
+ start_index
)
593 chunk
= min_t(int, max_mtts_first_page
, npages
);
596 err
= mlx4_write_mtt_chunk(dev
, mtt
, start_index
, chunk
, page_list
);
600 start_index
+= chunk
;
603 chunk
= min_t(int, mtts_per_page
, npages
);
608 int mlx4_write_mtt(struct mlx4_dev
*dev
, struct mlx4_mtt
*mtt
,
609 int start_index
, int npages
, u64
*page_list
)
611 struct mlx4_cmd_mailbox
*mailbox
= NULL
;
612 __be64
*inbox
= NULL
;
620 if (mlx4_is_mfunc(dev
)) {
621 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
623 return PTR_ERR(mailbox
);
624 inbox
= mailbox
->buf
;
627 chunk
= min_t(int, MLX4_MAILBOX_SIZE
/ sizeof(u64
) - 2,
629 inbox
[0] = cpu_to_be64(mtt
->offset
+ start_index
);
631 for (i
= 0; i
< chunk
; ++i
)
632 inbox
[i
+ 2] = cpu_to_be64(page_list
[i
] |
633 MLX4_MTT_FLAG_PRESENT
);
634 err
= mlx4_WRITE_MTT(dev
, mailbox
, chunk
);
636 mlx4_free_cmd_mailbox(dev
, mailbox
);
641 start_index
+= chunk
;
644 mlx4_free_cmd_mailbox(dev
, mailbox
);
648 return __mlx4_write_mtt(dev
, mtt
, start_index
, npages
, page_list
);
650 EXPORT_SYMBOL_GPL(mlx4_write_mtt
);
652 int mlx4_buf_write_mtt(struct mlx4_dev
*dev
, struct mlx4_mtt
*mtt
,
653 struct mlx4_buf
*buf
)
659 page_list
= kmalloc(buf
->npages
* sizeof *page_list
, GFP_KERNEL
);
663 for (i
= 0; i
< buf
->npages
; ++i
)
665 page_list
[i
] = buf
->direct
.map
+ (i
<< buf
->page_shift
);
667 page_list
[i
] = buf
->page_list
[i
].map
;
669 err
= mlx4_write_mtt(dev
, mtt
, 0, buf
->npages
, page_list
);
674 EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt
);
676 int mlx4_init_mr_table(struct mlx4_dev
*dev
)
678 struct mlx4_priv
*priv
= mlx4_priv(dev
);
679 struct mlx4_mr_table
*mr_table
= &priv
->mr_table
;
682 if (!is_power_of_2(dev
->caps
.num_mpts
))
685 /* Nothing to do for slaves - all MR handling is forwarded
687 if (mlx4_is_slave(dev
))
690 err
= mlx4_bitmap_init(&mr_table
->mpt_bitmap
, dev
->caps
.num_mpts
,
691 ~0, dev
->caps
.reserved_mrws
, 0);
695 err
= mlx4_buddy_init(&mr_table
->mtt_buddy
,
696 ilog2(dev
->caps
.num_mtts
/
697 (1 << log_mtts_per_seg
)));
701 if (dev
->caps
.reserved_mtts
) {
702 priv
->reserved_mtts
=
703 mlx4_alloc_mtt_range(dev
,
704 fls(dev
->caps
.reserved_mtts
- 1));
705 if (priv
->reserved_mtts
< 0) {
706 mlx4_warn(dev
, "MTT table of order %d is too small.\n",
707 mr_table
->mtt_buddy
.max_order
);
709 goto err_reserve_mtts
;
716 mlx4_buddy_cleanup(&mr_table
->mtt_buddy
);
719 mlx4_bitmap_cleanup(&mr_table
->mpt_bitmap
);
724 void mlx4_cleanup_mr_table(struct mlx4_dev
*dev
)
726 struct mlx4_priv
*priv
= mlx4_priv(dev
);
727 struct mlx4_mr_table
*mr_table
= &priv
->mr_table
;
729 if (mlx4_is_slave(dev
))
731 if (priv
->reserved_mtts
>= 0)
732 mlx4_free_mtt_range(dev
, priv
->reserved_mtts
,
733 fls(dev
->caps
.reserved_mtts
- 1));
734 mlx4_buddy_cleanup(&mr_table
->mtt_buddy
);
735 mlx4_bitmap_cleanup(&mr_table
->mpt_bitmap
);
738 static inline int mlx4_check_fmr(struct mlx4_fmr
*fmr
, u64
*page_list
,
739 int npages
, u64 iova
)
743 if (npages
> fmr
->max_pages
)
746 page_mask
= (1 << fmr
->page_shift
) - 1;
748 /* We are getting page lists, so va must be page aligned. */
749 if (iova
& page_mask
)
752 /* Trust the user not to pass misaligned data in page_list */
754 for (i
= 0; i
< npages
; ++i
) {
755 if (page_list
[i
] & ~page_mask
)
759 if (fmr
->maps
>= fmr
->max_maps
)
765 int mlx4_map_phys_fmr(struct mlx4_dev
*dev
, struct mlx4_fmr
*fmr
, u64
*page_list
,
766 int npages
, u64 iova
, u32
*lkey
, u32
*rkey
)
771 err
= mlx4_check_fmr(fmr
, page_list
, npages
, iova
);
777 key
= key_to_hw_index(fmr
->mr
.key
);
778 key
+= dev
->caps
.num_mpts
;
779 *lkey
= *rkey
= fmr
->mr
.key
= hw_index_to_key(key
);
781 *(u8
*) fmr
->mpt
= MLX4_MPT_STATUS_SW
;
783 /* Make sure MPT status is visible before writing MTT entries */
786 dma_sync_single_for_cpu(&dev
->pdev
->dev
, fmr
->dma_handle
,
787 npages
* sizeof(u64
), DMA_TO_DEVICE
);
789 for (i
= 0; i
< npages
; ++i
)
790 fmr
->mtts
[i
] = cpu_to_be64(page_list
[i
] | MLX4_MTT_FLAG_PRESENT
);
792 dma_sync_single_for_device(&dev
->pdev
->dev
, fmr
->dma_handle
,
793 npages
* sizeof(u64
), DMA_TO_DEVICE
);
795 fmr
->mpt
->key
= cpu_to_be32(key
);
796 fmr
->mpt
->lkey
= cpu_to_be32(key
);
797 fmr
->mpt
->length
= cpu_to_be64(npages
* (1ull << fmr
->page_shift
));
798 fmr
->mpt
->start
= cpu_to_be64(iova
);
800 /* Make MTT entries are visible before setting MPT status */
803 *(u8
*) fmr
->mpt
= MLX4_MPT_STATUS_HW
;
805 /* Make sure MPT status is visible before consumer can use FMR */
810 EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr
);
812 int mlx4_fmr_alloc(struct mlx4_dev
*dev
, u32 pd
, u32 access
, int max_pages
,
813 int max_maps
, u8 page_shift
, struct mlx4_fmr
*fmr
)
815 struct mlx4_priv
*priv
= mlx4_priv(dev
);
819 if (page_shift
< (ffs(dev
->caps
.page_size_cap
) - 1) || page_shift
>= 32)
822 /* All MTTs must fit in the same page */
823 if (max_pages
* sizeof *fmr
->mtts
> PAGE_SIZE
)
826 fmr
->page_shift
= page_shift
;
827 fmr
->max_pages
= max_pages
;
828 fmr
->max_maps
= max_maps
;
831 err
= mlx4_mr_alloc(dev
, pd
, 0, 0, access
, max_pages
,
832 page_shift
, &fmr
->mr
);
836 mtt_offset
= fmr
->mr
.mtt
.offset
* dev
->caps
.mtt_entry_sz
;
838 fmr
->mtts
= mlx4_table_find(&priv
->mr_table
.mtt_table
,
850 mlx4_mr_free(dev
, &fmr
->mr
);
853 EXPORT_SYMBOL_GPL(mlx4_fmr_alloc
);
855 int mlx4_fmr_alloc_reserved(struct mlx4_dev
*dev
, u32 mridx
,
856 u32 pd
, u32 access
, int max_pages
,
857 int max_maps
, u8 page_shift
, struct mlx4_fmr
*fmr
)
859 struct mlx4_priv
*priv
= mlx4_priv(dev
);
862 if (page_shift
< (ffs(dev
->caps
.page_size_cap
) - 1) || page_shift
>= 32)
865 /* All MTTs must fit in the same page */
866 if (max_pages
* sizeof *fmr
->mtts
> PAGE_SIZE
)
869 fmr
->page_shift
= page_shift
;
870 fmr
->max_pages
= max_pages
;
871 fmr
->max_maps
= max_maps
;
874 err
= mlx4_mr_alloc_reserved(dev
, mridx
, pd
, 0, 0, access
, max_pages
,
875 page_shift
, &fmr
->mr
);
879 fmr
->mtts
= mlx4_table_find(&priv
->mr_table
.mtt_table
,
890 mlx4_mr_free_reserved(dev
, &fmr
->mr
);
893 EXPORT_SYMBOL_GPL(mlx4_fmr_alloc_reserved
);
895 int mlx4_fmr_enable(struct mlx4_dev
*dev
, struct mlx4_fmr
*fmr
)
897 struct mlx4_priv
*priv
= mlx4_priv(dev
);
900 err
= mlx4_mr_enable(dev
, &fmr
->mr
);
904 fmr
->mpt
= mlx4_table_find(&priv
->mr_table
.dmpt_table
,
905 key_to_hw_index(fmr
->mr
.key
), NULL
);
911 EXPORT_SYMBOL_GPL(mlx4_fmr_enable
);
913 void mlx4_fmr_unmap(struct mlx4_dev
*dev
, struct mlx4_fmr
*fmr
,
914 u32
*lkey
, u32
*rkey
)
916 struct mlx4_cmd_mailbox
*mailbox
;
924 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
925 if (IS_ERR(mailbox
)) {
926 err
= PTR_ERR(mailbox
);
927 printk(KERN_WARNING
"mlx4_ib: mlx4_alloc_cmd_mailbox"
928 " failed (%d)\n", err
);
932 err
= mlx4_HW2SW_MPT(dev
, NULL
,
933 key_to_hw_index(fmr
->mr
.key
) &
934 (dev
->caps
.num_mpts
- 1));
935 mlx4_free_cmd_mailbox(dev
, mailbox
);
937 printk(KERN_WARNING
"mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n",
941 fmr
->mr
.enabled
= MLX4_MR_EN_SW
;
943 EXPORT_SYMBOL_GPL(mlx4_fmr_unmap
);
945 int mlx4_fmr_free(struct mlx4_dev
*dev
, struct mlx4_fmr
*fmr
)
950 mlx4_mr_free(dev
, &fmr
->mr
);
951 fmr
->mr
.enabled
= MLX4_MR_DISABLED
;
955 EXPORT_SYMBOL_GPL(mlx4_fmr_free
);
957 int mlx4_fmr_free_reserved(struct mlx4_dev
*dev
, struct mlx4_fmr
*fmr
)
962 mlx4_mr_free_reserved(dev
, &fmr
->mr
);
963 fmr
->mr
.enabled
= MLX4_MR_DISABLED
;
967 EXPORT_SYMBOL_GPL(mlx4_fmr_free_reserved
);
969 int mlx4_SYNC_TPT(struct mlx4_dev
*dev
)
971 return mlx4_cmd(dev
, 0, 0, 0, MLX4_CMD_SYNC_TPT
, 1000,
974 EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT
);