2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/errno.h>
36 #include <linux/export.h>
37 #include <linux/slab.h>
38 #include <linux/kernel.h>
39 #include <linux/vmalloc.h>
41 #include <linux/mlx4/cmd.h>
46 static u32
mlx4_buddy_alloc(struct mlx4_buddy
*buddy
, int order
)
52 spin_lock(&buddy
->lock
);
54 for (o
= order
; o
<= buddy
->max_order
; ++o
)
55 if (buddy
->num_free
[o
]) {
56 m
= 1 << (buddy
->max_order
- o
);
57 seg
= find_first_bit(buddy
->bits
[o
], m
);
62 spin_unlock(&buddy
->lock
);
66 clear_bit(seg
, buddy
->bits
[o
]);
72 set_bit(seg
^ 1, buddy
->bits
[o
]);
76 spin_unlock(&buddy
->lock
);
83 static void mlx4_buddy_free(struct mlx4_buddy
*buddy
, u32 seg
, int order
)
87 spin_lock(&buddy
->lock
);
89 while (test_bit(seg
^ 1, buddy
->bits
[order
])) {
90 clear_bit(seg
^ 1, buddy
->bits
[order
]);
91 --buddy
->num_free
[order
];
96 set_bit(seg
, buddy
->bits
[order
]);
97 ++buddy
->num_free
[order
];
99 spin_unlock(&buddy
->lock
);
102 static int mlx4_buddy_init(struct mlx4_buddy
*buddy
, int max_order
)
106 buddy
->max_order
= max_order
;
107 spin_lock_init(&buddy
->lock
);
109 buddy
->bits
= kcalloc(buddy
->max_order
+ 1, sizeof (long *),
111 buddy
->num_free
= kcalloc((buddy
->max_order
+ 1), sizeof *buddy
->num_free
,
113 if (!buddy
->bits
|| !buddy
->num_free
)
116 for (i
= 0; i
<= buddy
->max_order
; ++i
) {
117 s
= BITS_TO_LONGS(1 << (buddy
->max_order
- i
));
118 buddy
->bits
[i
] = kcalloc(s
, sizeof (long), GFP_KERNEL
| __GFP_NOWARN
);
119 if (!buddy
->bits
[i
]) {
120 buddy
->bits
[i
] = vzalloc(s
* sizeof(long));
126 set_bit(0, buddy
->bits
[buddy
->max_order
]);
127 buddy
->num_free
[buddy
->max_order
] = 1;
132 for (i
= 0; i
<= buddy
->max_order
; ++i
)
133 kvfree(buddy
->bits
[i
]);
137 kfree(buddy
->num_free
);
142 static void mlx4_buddy_cleanup(struct mlx4_buddy
*buddy
)
146 for (i
= 0; i
<= buddy
->max_order
; ++i
)
147 kvfree(buddy
->bits
[i
]);
150 kfree(buddy
->num_free
);
153 u32
__mlx4_alloc_mtt_range(struct mlx4_dev
*dev
, int order
)
155 struct mlx4_mr_table
*mr_table
= &mlx4_priv(dev
)->mr_table
;
160 seg_order
= max_t(int, order
- log_mtts_per_seg
, 0);
162 seg
= mlx4_buddy_alloc(&mr_table
->mtt_buddy
, seg_order
);
166 offset
= seg
* (1 << log_mtts_per_seg
);
168 if (mlx4_table_get_range(dev
, &mr_table
->mtt_table
, offset
,
169 offset
+ (1 << order
) - 1)) {
170 mlx4_buddy_free(&mr_table
->mtt_buddy
, seg
, seg_order
);
177 static u32
mlx4_alloc_mtt_range(struct mlx4_dev
*dev
, int order
)
183 if (mlx4_is_mfunc(dev
)) {
184 set_param_l(&in_param
, order
);
185 err
= mlx4_cmd_imm(dev
, in_param
, &out_param
, RES_MTT
,
186 RES_OP_RESERVE_AND_MAP
,
188 MLX4_CMD_TIME_CLASS_A
,
192 return get_param_l(&out_param
);
194 return __mlx4_alloc_mtt_range(dev
, order
);
197 int mlx4_mtt_init(struct mlx4_dev
*dev
, int npages
, int page_shift
,
198 struct mlx4_mtt
*mtt
)
204 mtt
->page_shift
= MLX4_ICM_PAGE_SHIFT
;
207 mtt
->page_shift
= page_shift
;
209 for (mtt
->order
= 0, i
= 1; i
< npages
; i
<<= 1)
212 mtt
->offset
= mlx4_alloc_mtt_range(dev
, mtt
->order
);
213 if (mtt
->offset
== -1)
218 EXPORT_SYMBOL_GPL(mlx4_mtt_init
);
220 void __mlx4_free_mtt_range(struct mlx4_dev
*dev
, u32 offset
, int order
)
224 struct mlx4_mr_table
*mr_table
= &mlx4_priv(dev
)->mr_table
;
226 seg_order
= max_t(int, order
- log_mtts_per_seg
, 0);
227 first_seg
= offset
/ (1 << log_mtts_per_seg
);
229 mlx4_buddy_free(&mr_table
->mtt_buddy
, first_seg
, seg_order
);
230 mlx4_table_put_range(dev
, &mr_table
->mtt_table
, offset
,
231 offset
+ (1 << order
) - 1);
234 static void mlx4_free_mtt_range(struct mlx4_dev
*dev
, u32 offset
, int order
)
239 if (mlx4_is_mfunc(dev
)) {
240 set_param_l(&in_param
, offset
);
241 set_param_h(&in_param
, order
);
242 err
= mlx4_cmd(dev
, in_param
, RES_MTT
, RES_OP_RESERVE_AND_MAP
,
244 MLX4_CMD_TIME_CLASS_A
,
247 mlx4_warn(dev
, "Failed to free mtt range at:%d order:%d\n",
251 __mlx4_free_mtt_range(dev
, offset
, order
);
254 void mlx4_mtt_cleanup(struct mlx4_dev
*dev
, struct mlx4_mtt
*mtt
)
259 mlx4_free_mtt_range(dev
, mtt
->offset
, mtt
->order
);
261 EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup
);
263 u64
mlx4_mtt_addr(struct mlx4_dev
*dev
, struct mlx4_mtt
*mtt
)
265 return (u64
) mtt
->offset
* dev
->caps
.mtt_entry_sz
;
267 EXPORT_SYMBOL_GPL(mlx4_mtt_addr
);
269 static u32
hw_index_to_key(u32 ind
)
271 return (ind
>> 24) | (ind
<< 8);
274 static u32
key_to_hw_index(u32 key
)
276 return (key
<< 24) | (key
>> 8);
279 static int mlx4_SW2HW_MPT(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*mailbox
,
282 return mlx4_cmd(dev
, mailbox
->dma
, mpt_index
,
283 0, MLX4_CMD_SW2HW_MPT
, MLX4_CMD_TIME_CLASS_B
,
287 static int mlx4_HW2SW_MPT(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*mailbox
,
290 return mlx4_cmd_box(dev
, 0, mailbox
? mailbox
->dma
: 0, mpt_index
,
291 !mailbox
, MLX4_CMD_HW2SW_MPT
,
292 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_WRAPPED
);
295 /* Must protect against concurrent access */
296 int mlx4_mr_hw_get_mpt(struct mlx4_dev
*dev
, struct mlx4_mr
*mmr
,
297 struct mlx4_mpt_entry
***mpt_entry
)
300 int key
= key_to_hw_index(mmr
->key
) & (dev
->caps
.num_mpts
- 1);
301 struct mlx4_cmd_mailbox
*mailbox
= NULL
;
303 if (mmr
->enabled
!= MLX4_MPT_EN_HW
)
306 err
= mlx4_HW2SW_MPT(dev
, NULL
, key
);
308 mlx4_warn(dev
, "HW2SW_MPT failed (%d).", err
);
309 mlx4_warn(dev
, "Most likely the MR has MWs bound to it.\n");
313 mmr
->enabled
= MLX4_MPT_EN_SW
;
315 if (!mlx4_is_mfunc(dev
)) {
316 **mpt_entry
= mlx4_table_find(
317 &mlx4_priv(dev
)->mr_table
.dmpt_table
,
320 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
322 return PTR_ERR(mailbox
);
324 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, key
,
325 0, MLX4_CMD_QUERY_MPT
,
326 MLX4_CMD_TIME_CLASS_B
,
331 *mpt_entry
= (struct mlx4_mpt_entry
**)&mailbox
->buf
;
334 if (!(*mpt_entry
) || !(**mpt_entry
)) {
342 mlx4_free_cmd_mailbox(dev
, mailbox
);
345 EXPORT_SYMBOL_GPL(mlx4_mr_hw_get_mpt
);
347 int mlx4_mr_hw_write_mpt(struct mlx4_dev
*dev
, struct mlx4_mr
*mmr
,
348 struct mlx4_mpt_entry
**mpt_entry
)
352 if (!mlx4_is_mfunc(dev
)) {
353 /* Make sure any changes to this entry are flushed */
356 *(u8
*)(*mpt_entry
) = MLX4_MPT_STATUS_HW
;
358 /* Make sure the new status is written */
361 err
= mlx4_SYNC_TPT(dev
);
363 int key
= key_to_hw_index(mmr
->key
) & (dev
->caps
.num_mpts
- 1);
365 struct mlx4_cmd_mailbox
*mailbox
=
366 container_of((void *)mpt_entry
, struct mlx4_cmd_mailbox
,
369 err
= mlx4_SW2HW_MPT(dev
, mailbox
, key
);
373 mmr
->pd
= be32_to_cpu((*mpt_entry
)->pd_flags
) & MLX4_MPT_PD_MASK
;
374 mmr
->enabled
= MLX4_MPT_EN_HW
;
378 EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt
);
380 void mlx4_mr_hw_put_mpt(struct mlx4_dev
*dev
,
381 struct mlx4_mpt_entry
**mpt_entry
)
383 if (mlx4_is_mfunc(dev
)) {
384 struct mlx4_cmd_mailbox
*mailbox
=
385 container_of((void *)mpt_entry
, struct mlx4_cmd_mailbox
,
387 mlx4_free_cmd_mailbox(dev
, mailbox
);
390 EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt
);
392 int mlx4_mr_hw_change_pd(struct mlx4_dev
*dev
, struct mlx4_mpt_entry
*mpt_entry
,
395 u32 pd_flags
= be32_to_cpu(mpt_entry
->pd_flags
) & ~MLX4_MPT_PD_MASK
;
396 /* The wrapper function will put the slave's id here */
397 if (mlx4_is_mfunc(dev
))
398 pd_flags
&= ~MLX4_MPT_PD_VF_MASK
;
400 mpt_entry
->pd_flags
= cpu_to_be32(pd_flags
|
401 (pdn
& MLX4_MPT_PD_MASK
)
402 | MLX4_MPT_PD_FLAG_EN_INV
);
405 EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_pd
);
407 int mlx4_mr_hw_change_access(struct mlx4_dev
*dev
,
408 struct mlx4_mpt_entry
*mpt_entry
,
411 u32 flags
= (be32_to_cpu(mpt_entry
->flags
) & ~MLX4_PERM_MASK
) |
412 (access
& MLX4_PERM_MASK
);
414 mpt_entry
->flags
= cpu_to_be32(flags
);
417 EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_access
);
419 static int mlx4_mr_alloc_reserved(struct mlx4_dev
*dev
, u32 mridx
, u32 pd
,
420 u64 iova
, u64 size
, u32 access
, int npages
,
421 int page_shift
, struct mlx4_mr
*mr
)
427 mr
->enabled
= MLX4_MPT_DISABLED
;
428 mr
->key
= hw_index_to_key(mridx
);
430 return mlx4_mtt_init(dev
, npages
, page_shift
, &mr
->mtt
);
433 static int mlx4_WRITE_MTT(struct mlx4_dev
*dev
,
434 struct mlx4_cmd_mailbox
*mailbox
,
437 return mlx4_cmd(dev
, mailbox
->dma
, num_entries
, 0, MLX4_CMD_WRITE_MTT
,
438 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
441 int __mlx4_mpt_reserve(struct mlx4_dev
*dev
)
443 struct mlx4_priv
*priv
= mlx4_priv(dev
);
445 return mlx4_bitmap_alloc(&priv
->mr_table
.mpt_bitmap
);
448 static int mlx4_mpt_reserve(struct mlx4_dev
*dev
)
452 if (mlx4_is_mfunc(dev
)) {
453 if (mlx4_cmd_imm(dev
, 0, &out_param
, RES_MPT
, RES_OP_RESERVE
,
455 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
))
457 return get_param_l(&out_param
);
459 return __mlx4_mpt_reserve(dev
);
462 void __mlx4_mpt_release(struct mlx4_dev
*dev
, u32 index
)
464 struct mlx4_priv
*priv
= mlx4_priv(dev
);
466 mlx4_bitmap_free(&priv
->mr_table
.mpt_bitmap
, index
, MLX4_NO_RR
);
469 static void mlx4_mpt_release(struct mlx4_dev
*dev
, u32 index
)
473 if (mlx4_is_mfunc(dev
)) {
474 set_param_l(&in_param
, index
);
475 if (mlx4_cmd(dev
, in_param
, RES_MPT
, RES_OP_RESERVE
,
477 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
))
478 mlx4_warn(dev
, "Failed to release mr index:%d\n",
482 __mlx4_mpt_release(dev
, index
);
485 int __mlx4_mpt_alloc_icm(struct mlx4_dev
*dev
, u32 index
, gfp_t gfp
)
487 struct mlx4_mr_table
*mr_table
= &mlx4_priv(dev
)->mr_table
;
489 return mlx4_table_get(dev
, &mr_table
->dmpt_table
, index
, gfp
);
492 static int mlx4_mpt_alloc_icm(struct mlx4_dev
*dev
, u32 index
, gfp_t gfp
)
496 if (mlx4_is_mfunc(dev
)) {
497 set_param_l(¶m
, index
);
498 return mlx4_cmd_imm(dev
, param
, ¶m
, RES_MPT
, RES_OP_MAP_ICM
,
500 MLX4_CMD_TIME_CLASS_A
,
503 return __mlx4_mpt_alloc_icm(dev
, index
, gfp
);
506 void __mlx4_mpt_free_icm(struct mlx4_dev
*dev
, u32 index
)
508 struct mlx4_mr_table
*mr_table
= &mlx4_priv(dev
)->mr_table
;
510 mlx4_table_put(dev
, &mr_table
->dmpt_table
, index
);
513 static void mlx4_mpt_free_icm(struct mlx4_dev
*dev
, u32 index
)
517 if (mlx4_is_mfunc(dev
)) {
518 set_param_l(&in_param
, index
);
519 if (mlx4_cmd(dev
, in_param
, RES_MPT
, RES_OP_MAP_ICM
,
520 MLX4_CMD_FREE_RES
, MLX4_CMD_TIME_CLASS_A
,
522 mlx4_warn(dev
, "Failed to free icm of mr index:%d\n",
526 return __mlx4_mpt_free_icm(dev
, index
);
529 int mlx4_mr_alloc(struct mlx4_dev
*dev
, u32 pd
, u64 iova
, u64 size
, u32 access
,
530 int npages
, int page_shift
, struct mlx4_mr
*mr
)
535 index
= mlx4_mpt_reserve(dev
);
539 err
= mlx4_mr_alloc_reserved(dev
, index
, pd
, iova
, size
,
540 access
, npages
, page_shift
, mr
);
542 mlx4_mpt_release(dev
, index
);
546 EXPORT_SYMBOL_GPL(mlx4_mr_alloc
);
548 static int mlx4_mr_free_reserved(struct mlx4_dev
*dev
, struct mlx4_mr
*mr
)
552 if (mr
->enabled
== MLX4_MPT_EN_HW
) {
553 err
= mlx4_HW2SW_MPT(dev
, NULL
,
554 key_to_hw_index(mr
->key
) &
555 (dev
->caps
.num_mpts
- 1));
557 mlx4_warn(dev
, "HW2SW_MPT failed (%d), MR has MWs bound to it\n",
562 mr
->enabled
= MLX4_MPT_EN_SW
;
564 mlx4_mtt_cleanup(dev
, &mr
->mtt
);
569 int mlx4_mr_free(struct mlx4_dev
*dev
, struct mlx4_mr
*mr
)
573 ret
= mlx4_mr_free_reserved(dev
, mr
);
577 mlx4_mpt_free_icm(dev
, key_to_hw_index(mr
->key
));
578 mlx4_mpt_release(dev
, key_to_hw_index(mr
->key
));
582 EXPORT_SYMBOL_GPL(mlx4_mr_free
);
584 void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev
*dev
, struct mlx4_mr
*mr
)
586 mlx4_mtt_cleanup(dev
, &mr
->mtt
);
589 EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup
);
591 int mlx4_mr_rereg_mem_write(struct mlx4_dev
*dev
, struct mlx4_mr
*mr
,
592 u64 iova
, u64 size
, int npages
,
593 int page_shift
, struct mlx4_mpt_entry
*mpt_entry
)
597 err
= mlx4_mtt_init(dev
, npages
, page_shift
, &mr
->mtt
);
601 mpt_entry
->start
= cpu_to_be64(iova
);
602 mpt_entry
->length
= cpu_to_be64(size
);
603 mpt_entry
->entity_size
= cpu_to_be32(page_shift
);
604 mpt_entry
->flags
&= ~(cpu_to_be32(MLX4_MPT_FLAG_FREE
|
605 MLX4_MPT_FLAG_SW_OWNS
));
606 if (mr
->mtt
.order
< 0) {
607 mpt_entry
->flags
|= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL
);
608 mpt_entry
->mtt_addr
= 0;
610 mpt_entry
->mtt_addr
= cpu_to_be64(mlx4_mtt_addr(dev
,
612 if (mr
->mtt
.page_shift
== 0)
613 mpt_entry
->mtt_sz
= cpu_to_be32(1 << mr
->mtt
.order
);
615 if (mr
->mtt
.order
>= 0 && mr
->mtt
.page_shift
== 0) {
616 /* fast register MR in free state */
617 mpt_entry
->flags
|= cpu_to_be32(MLX4_MPT_FLAG_FREE
);
618 mpt_entry
->pd_flags
|= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG
|
619 MLX4_MPT_PD_FLAG_RAE
);
621 mpt_entry
->flags
|= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS
);
623 mr
->enabled
= MLX4_MPT_EN_SW
;
627 EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_write
);
629 int mlx4_mr_enable(struct mlx4_dev
*dev
, struct mlx4_mr
*mr
)
631 struct mlx4_cmd_mailbox
*mailbox
;
632 struct mlx4_mpt_entry
*mpt_entry
;
635 err
= mlx4_mpt_alloc_icm(dev
, key_to_hw_index(mr
->key
), GFP_KERNEL
);
639 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
640 if (IS_ERR(mailbox
)) {
641 err
= PTR_ERR(mailbox
);
644 mpt_entry
= mailbox
->buf
;
645 mpt_entry
->flags
= cpu_to_be32(MLX4_MPT_FLAG_MIO
|
646 MLX4_MPT_FLAG_REGION
|
649 mpt_entry
->key
= cpu_to_be32(key_to_hw_index(mr
->key
));
650 mpt_entry
->pd_flags
= cpu_to_be32(mr
->pd
| MLX4_MPT_PD_FLAG_EN_INV
);
651 mpt_entry
->start
= cpu_to_be64(mr
->iova
);
652 mpt_entry
->length
= cpu_to_be64(mr
->size
);
653 mpt_entry
->entity_size
= cpu_to_be32(mr
->mtt
.page_shift
);
655 if (mr
->mtt
.order
< 0) {
656 mpt_entry
->flags
|= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL
);
657 mpt_entry
->mtt_addr
= 0;
659 mpt_entry
->mtt_addr
= cpu_to_be64(mlx4_mtt_addr(dev
,
663 if (mr
->mtt
.order
>= 0 && mr
->mtt
.page_shift
== 0) {
664 /* fast register MR in free state */
665 mpt_entry
->flags
|= cpu_to_be32(MLX4_MPT_FLAG_FREE
);
666 mpt_entry
->pd_flags
|= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG
|
667 MLX4_MPT_PD_FLAG_RAE
);
668 mpt_entry
->mtt_sz
= cpu_to_be32(1 << mr
->mtt
.order
);
670 mpt_entry
->flags
|= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS
);
673 err
= mlx4_SW2HW_MPT(dev
, mailbox
,
674 key_to_hw_index(mr
->key
) & (dev
->caps
.num_mpts
- 1));
676 mlx4_warn(dev
, "SW2HW_MPT failed (%d)\n", err
);
679 mr
->enabled
= MLX4_MPT_EN_HW
;
681 mlx4_free_cmd_mailbox(dev
, mailbox
);
686 mlx4_free_cmd_mailbox(dev
, mailbox
);
689 mlx4_mpt_free_icm(dev
, key_to_hw_index(mr
->key
));
692 EXPORT_SYMBOL_GPL(mlx4_mr_enable
);
694 static int mlx4_write_mtt_chunk(struct mlx4_dev
*dev
, struct mlx4_mtt
*mtt
,
695 int start_index
, int npages
, u64
*page_list
)
697 struct mlx4_priv
*priv
= mlx4_priv(dev
);
699 dma_addr_t dma_handle
;
702 mtts
= mlx4_table_find(&priv
->mr_table
.mtt_table
, mtt
->offset
+
703 start_index
, &dma_handle
);
708 dma_sync_single_for_cpu(&dev
->persist
->pdev
->dev
, dma_handle
,
709 npages
* sizeof (u64
), DMA_TO_DEVICE
);
711 for (i
= 0; i
< npages
; ++i
)
712 mtts
[i
] = cpu_to_be64(page_list
[i
] | MLX4_MTT_FLAG_PRESENT
);
714 dma_sync_single_for_device(&dev
->persist
->pdev
->dev
, dma_handle
,
715 npages
* sizeof (u64
), DMA_TO_DEVICE
);
720 int __mlx4_write_mtt(struct mlx4_dev
*dev
, struct mlx4_mtt
*mtt
,
721 int start_index
, int npages
, u64
*page_list
)
726 int max_mtts_first_page
;
728 /* compute how may mtts fit in the first page */
729 mtts_per_page
= PAGE_SIZE
/ sizeof(u64
);
730 max_mtts_first_page
= mtts_per_page
- (mtt
->offset
+ start_index
)
733 chunk
= min_t(int, max_mtts_first_page
, npages
);
736 err
= mlx4_write_mtt_chunk(dev
, mtt
, start_index
, chunk
, page_list
);
740 start_index
+= chunk
;
743 chunk
= min_t(int, mtts_per_page
, npages
);
748 int mlx4_write_mtt(struct mlx4_dev
*dev
, struct mlx4_mtt
*mtt
,
749 int start_index
, int npages
, u64
*page_list
)
751 struct mlx4_cmd_mailbox
*mailbox
= NULL
;
752 __be64
*inbox
= NULL
;
760 if (mlx4_is_mfunc(dev
)) {
761 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
763 return PTR_ERR(mailbox
);
764 inbox
= mailbox
->buf
;
767 chunk
= min_t(int, MLX4_MAILBOX_SIZE
/ sizeof(u64
) - 2,
769 inbox
[0] = cpu_to_be64(mtt
->offset
+ start_index
);
771 for (i
= 0; i
< chunk
; ++i
)
772 inbox
[i
+ 2] = cpu_to_be64(page_list
[i
] |
773 MLX4_MTT_FLAG_PRESENT
);
774 err
= mlx4_WRITE_MTT(dev
, mailbox
, chunk
);
776 mlx4_free_cmd_mailbox(dev
, mailbox
);
781 start_index
+= chunk
;
784 mlx4_free_cmd_mailbox(dev
, mailbox
);
788 return __mlx4_write_mtt(dev
, mtt
, start_index
, npages
, page_list
);
790 EXPORT_SYMBOL_GPL(mlx4_write_mtt
);
792 int mlx4_buf_write_mtt(struct mlx4_dev
*dev
, struct mlx4_mtt
*mtt
,
793 struct mlx4_buf
*buf
, gfp_t gfp
)
799 page_list
= kmalloc(buf
->npages
* sizeof *page_list
,
804 for (i
= 0; i
< buf
->npages
; ++i
)
806 page_list
[i
] = buf
->direct
.map
+ (i
<< buf
->page_shift
);
808 page_list
[i
] = buf
->page_list
[i
].map
;
810 err
= mlx4_write_mtt(dev
, mtt
, 0, buf
->npages
, page_list
);
815 EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt
);
817 int mlx4_mw_alloc(struct mlx4_dev
*dev
, u32 pd
, enum mlx4_mw_type type
,
822 if ((type
== MLX4_MW_TYPE_1
&&
823 !(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_MEM_WINDOW
)) ||
824 (type
== MLX4_MW_TYPE_2
&&
825 !(dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_TYPE_2_WIN
)))
828 index
= mlx4_mpt_reserve(dev
);
832 mw
->key
= hw_index_to_key(index
);
835 mw
->enabled
= MLX4_MPT_DISABLED
;
839 EXPORT_SYMBOL_GPL(mlx4_mw_alloc
);
841 int mlx4_mw_enable(struct mlx4_dev
*dev
, struct mlx4_mw
*mw
)
843 struct mlx4_cmd_mailbox
*mailbox
;
844 struct mlx4_mpt_entry
*mpt_entry
;
847 err
= mlx4_mpt_alloc_icm(dev
, key_to_hw_index(mw
->key
), GFP_KERNEL
);
851 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
852 if (IS_ERR(mailbox
)) {
853 err
= PTR_ERR(mailbox
);
856 mpt_entry
= mailbox
->buf
;
858 /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned
859 * off, thus creating a memory window and not a memory region.
861 mpt_entry
->key
= cpu_to_be32(key_to_hw_index(mw
->key
));
862 mpt_entry
->pd_flags
= cpu_to_be32(mw
->pd
);
863 if (mw
->type
== MLX4_MW_TYPE_2
) {
864 mpt_entry
->flags
|= cpu_to_be32(MLX4_MPT_FLAG_FREE
);
865 mpt_entry
->qpn
= cpu_to_be32(MLX4_MPT_QP_FLAG_BOUND_QP
);
866 mpt_entry
->pd_flags
|= cpu_to_be32(MLX4_MPT_PD_FLAG_EN_INV
);
869 err
= mlx4_SW2HW_MPT(dev
, mailbox
,
870 key_to_hw_index(mw
->key
) &
871 (dev
->caps
.num_mpts
- 1));
873 mlx4_warn(dev
, "SW2HW_MPT failed (%d)\n", err
);
876 mw
->enabled
= MLX4_MPT_EN_HW
;
878 mlx4_free_cmd_mailbox(dev
, mailbox
);
883 mlx4_free_cmd_mailbox(dev
, mailbox
);
886 mlx4_mpt_free_icm(dev
, key_to_hw_index(mw
->key
));
889 EXPORT_SYMBOL_GPL(mlx4_mw_enable
);
891 void mlx4_mw_free(struct mlx4_dev
*dev
, struct mlx4_mw
*mw
)
895 if (mw
->enabled
== MLX4_MPT_EN_HW
) {
896 err
= mlx4_HW2SW_MPT(dev
, NULL
,
897 key_to_hw_index(mw
->key
) &
898 (dev
->caps
.num_mpts
- 1));
900 mlx4_warn(dev
, "xxx HW2SW_MPT failed (%d)\n", err
);
902 mw
->enabled
= MLX4_MPT_EN_SW
;
905 mlx4_mpt_free_icm(dev
, key_to_hw_index(mw
->key
));
906 mlx4_mpt_release(dev
, key_to_hw_index(mw
->key
));
908 EXPORT_SYMBOL_GPL(mlx4_mw_free
);
910 int mlx4_init_mr_table(struct mlx4_dev
*dev
)
912 struct mlx4_priv
*priv
= mlx4_priv(dev
);
913 struct mlx4_mr_table
*mr_table
= &priv
->mr_table
;
916 /* Nothing to do for slaves - all MR handling is forwarded
918 if (mlx4_is_slave(dev
))
921 if (!is_power_of_2(dev
->caps
.num_mpts
))
924 err
= mlx4_bitmap_init(&mr_table
->mpt_bitmap
, dev
->caps
.num_mpts
,
925 ~0, dev
->caps
.reserved_mrws
, 0);
929 err
= mlx4_buddy_init(&mr_table
->mtt_buddy
,
930 ilog2((u32
)dev
->caps
.num_mtts
/
931 (1 << log_mtts_per_seg
)));
935 if (dev
->caps
.reserved_mtts
) {
936 priv
->reserved_mtts
=
937 mlx4_alloc_mtt_range(dev
,
938 fls(dev
->caps
.reserved_mtts
- 1));
939 if (priv
->reserved_mtts
< 0) {
940 mlx4_warn(dev
, "MTT table of order %u is too small\n",
941 mr_table
->mtt_buddy
.max_order
);
943 goto err_reserve_mtts
;
950 mlx4_buddy_cleanup(&mr_table
->mtt_buddy
);
953 mlx4_bitmap_cleanup(&mr_table
->mpt_bitmap
);
958 void mlx4_cleanup_mr_table(struct mlx4_dev
*dev
)
960 struct mlx4_priv
*priv
= mlx4_priv(dev
);
961 struct mlx4_mr_table
*mr_table
= &priv
->mr_table
;
963 if (mlx4_is_slave(dev
))
965 if (priv
->reserved_mtts
>= 0)
966 mlx4_free_mtt_range(dev
, priv
->reserved_mtts
,
967 fls(dev
->caps
.reserved_mtts
- 1));
968 mlx4_buddy_cleanup(&mr_table
->mtt_buddy
);
969 mlx4_bitmap_cleanup(&mr_table
->mpt_bitmap
);
972 static inline int mlx4_check_fmr(struct mlx4_fmr
*fmr
, u64
*page_list
,
973 int npages
, u64 iova
)
977 if (npages
> fmr
->max_pages
)
980 page_mask
= (1 << fmr
->page_shift
) - 1;
982 /* We are getting page lists, so va must be page aligned. */
983 if (iova
& page_mask
)
986 /* Trust the user not to pass misaligned data in page_list */
988 for (i
= 0; i
< npages
; ++i
) {
989 if (page_list
[i
] & ~page_mask
)
993 if (fmr
->maps
>= fmr
->max_maps
)
999 int mlx4_map_phys_fmr(struct mlx4_dev
*dev
, struct mlx4_fmr
*fmr
, u64
*page_list
,
1000 int npages
, u64 iova
, u32
*lkey
, u32
*rkey
)
1005 err
= mlx4_check_fmr(fmr
, page_list
, npages
, iova
);
1011 key
= key_to_hw_index(fmr
->mr
.key
);
1012 key
+= dev
->caps
.num_mpts
;
1013 *lkey
= *rkey
= fmr
->mr
.key
= hw_index_to_key(key
);
1015 *(u8
*) fmr
->mpt
= MLX4_MPT_STATUS_SW
;
1017 /* Make sure MPT status is visible before writing MTT entries */
1020 dma_sync_single_for_cpu(&dev
->persist
->pdev
->dev
, fmr
->dma_handle
,
1021 npages
* sizeof(u64
), DMA_TO_DEVICE
);
1023 for (i
= 0; i
< npages
; ++i
)
1024 fmr
->mtts
[i
] = cpu_to_be64(page_list
[i
] | MLX4_MTT_FLAG_PRESENT
);
1026 dma_sync_single_for_device(&dev
->persist
->pdev
->dev
, fmr
->dma_handle
,
1027 npages
* sizeof(u64
), DMA_TO_DEVICE
);
1029 fmr
->mpt
->key
= cpu_to_be32(key
);
1030 fmr
->mpt
->lkey
= cpu_to_be32(key
);
1031 fmr
->mpt
->length
= cpu_to_be64(npages
* (1ull << fmr
->page_shift
));
1032 fmr
->mpt
->start
= cpu_to_be64(iova
);
1034 /* Make MTT entries are visible before setting MPT status */
1037 *(u8
*) fmr
->mpt
= MLX4_MPT_STATUS_HW
;
1039 /* Make sure MPT status is visible before consumer can use FMR */
1044 EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr
);
1046 int mlx4_fmr_alloc(struct mlx4_dev
*dev
, u32 pd
, u32 access
, int max_pages
,
1047 int max_maps
, u8 page_shift
, struct mlx4_fmr
*fmr
)
1049 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1052 if (max_maps
> dev
->caps
.max_fmr_maps
)
1055 if (page_shift
< (ffs(dev
->caps
.page_size_cap
) - 1) || page_shift
>= 32)
1058 /* All MTTs must fit in the same page */
1059 if (max_pages
* sizeof *fmr
->mtts
> PAGE_SIZE
)
1062 fmr
->page_shift
= page_shift
;
1063 fmr
->max_pages
= max_pages
;
1064 fmr
->max_maps
= max_maps
;
1067 err
= mlx4_mr_alloc(dev
, pd
, 0, 0, access
, max_pages
,
1068 page_shift
, &fmr
->mr
);
1072 fmr
->mtts
= mlx4_table_find(&priv
->mr_table
.mtt_table
,
1084 (void) mlx4_mr_free(dev
, &fmr
->mr
);
1087 EXPORT_SYMBOL_GPL(mlx4_fmr_alloc
);
1089 int mlx4_fmr_enable(struct mlx4_dev
*dev
, struct mlx4_fmr
*fmr
)
1091 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1094 err
= mlx4_mr_enable(dev
, &fmr
->mr
);
1098 fmr
->mpt
= mlx4_table_find(&priv
->mr_table
.dmpt_table
,
1099 key_to_hw_index(fmr
->mr
.key
), NULL
);
1105 EXPORT_SYMBOL_GPL(mlx4_fmr_enable
);
1107 void mlx4_fmr_unmap(struct mlx4_dev
*dev
, struct mlx4_fmr
*fmr
,
1108 u32
*lkey
, u32
*rkey
)
1110 struct mlx4_cmd_mailbox
*mailbox
;
1118 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1119 if (IS_ERR(mailbox
)) {
1120 err
= PTR_ERR(mailbox
);
1121 pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err
);
1125 err
= mlx4_HW2SW_MPT(dev
, NULL
,
1126 key_to_hw_index(fmr
->mr
.key
) &
1127 (dev
->caps
.num_mpts
- 1));
1128 mlx4_free_cmd_mailbox(dev
, mailbox
);
1130 pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err
);
1133 fmr
->mr
.enabled
= MLX4_MPT_EN_SW
;
1135 EXPORT_SYMBOL_GPL(mlx4_fmr_unmap
);
1137 int mlx4_fmr_free(struct mlx4_dev
*dev
, struct mlx4_fmr
*fmr
)
1144 ret
= mlx4_mr_free(dev
, &fmr
->mr
);
1147 fmr
->mr
.enabled
= MLX4_MPT_DISABLED
;
1151 EXPORT_SYMBOL_GPL(mlx4_fmr_free
);
1153 int mlx4_SYNC_TPT(struct mlx4_dev
*dev
)
1155 return mlx4_cmd(dev
, 0, 0, 0, MLX4_CMD_SYNC_TPT
,
1156 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1158 EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT
);