2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kref.h>
35 #include <linux/random.h>
36 #include <linux/debugfs.h>
37 #include <linux/export.h>
38 #include <linux/delay.h>
39 #include <rdma/ib_umem.h>
40 #include <rdma/ib_umem_odp.h>
41 #include <rdma/ib_verbs.h>
45 MAX_PENDING_REG_MR
= 8,
48 #define MLX5_UMR_ALIGN 2048
51 create_mkey_callback(int status
, struct mlx5_async_work
*context
);
54 assign_mkey_variant(struct mlx5_ib_dev
*dev
, struct mlx5_core_mkey
*mkey
,
57 u8 key
= atomic_inc_return(&dev
->mkey_var
);
60 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
61 MLX5_SET(mkc
, mkc
, mkey_7_0
, key
);
66 mlx5_ib_create_mkey(struct mlx5_ib_dev
*dev
, struct mlx5_core_mkey
*mkey
,
69 assign_mkey_variant(dev
, mkey
, in
);
70 return mlx5_core_create_mkey(dev
->mdev
, mkey
, in
, inlen
);
74 mlx5_ib_create_mkey_cb(struct mlx5_ib_dev
*dev
,
75 struct mlx5_core_mkey
*mkey
,
76 struct mlx5_async_ctx
*async_ctx
,
77 u32
*in
, int inlen
, u32
*out
, int outlen
,
78 struct mlx5_async_work
*context
)
80 MLX5_SET(create_mkey_in
, in
, opcode
, MLX5_CMD_OP_CREATE_MKEY
);
81 assign_mkey_variant(dev
, mkey
, in
);
82 return mlx5_cmd_exec_cb(async_ctx
, in
, inlen
, out
, outlen
,
83 create_mkey_callback
, context
);
86 static void clean_mr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
);
87 static void dereg_mr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
);
88 static int mr_cache_max_order(struct mlx5_ib_dev
*dev
);
89 static void queue_adjust_cache_locked(struct mlx5_cache_ent
*ent
);
91 static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev
*dev
)
93 return !MLX5_CAP_GEN(dev
->mdev
, umr_indirect_mkey_disabled
);
96 static int destroy_mkey(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
)
98 WARN_ON(xa_load(&dev
->odp_mkeys
, mlx5_base_mkey(mr
->mmkey
.key
)));
100 return mlx5_core_destroy_mkey(dev
->mdev
, &mr
->mmkey
);
103 static bool use_umr_mtt_update(struct mlx5_ib_mr
*mr
, u64 start
, u64 length
)
105 return ((u64
)1 << mr
->order
) * MLX5_ADAPTER_PAGE_SIZE
>=
106 length
+ (start
& (MLX5_ADAPTER_PAGE_SIZE
- 1));
109 static void create_mkey_callback(int status
, struct mlx5_async_work
*context
)
111 struct mlx5_ib_mr
*mr
=
112 container_of(context
, struct mlx5_ib_mr
, cb_work
);
113 struct mlx5_ib_dev
*dev
= mr
->dev
;
114 struct mlx5_cache_ent
*ent
= mr
->cache_ent
;
118 mlx5_ib_warn(dev
, "async reg mr failed. status %d\n", status
);
120 spin_lock_irqsave(&ent
->lock
, flags
);
122 WRITE_ONCE(dev
->fill_delay
, 1);
123 spin_unlock_irqrestore(&ent
->lock
, flags
);
124 mod_timer(&dev
->delay_timer
, jiffies
+ HZ
);
128 mr
->mmkey
.type
= MLX5_MKEY_MR
;
129 mr
->mmkey
.key
|= mlx5_idx_to_mkey(
130 MLX5_GET(create_mkey_out
, mr
->out
, mkey_index
));
132 WRITE_ONCE(dev
->cache
.last_add
, jiffies
);
134 spin_lock_irqsave(&ent
->lock
, flags
);
135 list_add_tail(&mr
->list
, &ent
->head
);
136 ent
->available_mrs
++;
138 /* If we are doing fill_to_high_water then keep going. */
139 queue_adjust_cache_locked(ent
);
141 spin_unlock_irqrestore(&ent
->lock
, flags
);
144 static struct mlx5_ib_mr
*alloc_cache_mr(struct mlx5_cache_ent
*ent
, void *mkc
)
146 struct mlx5_ib_mr
*mr
;
148 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
151 mr
->order
= ent
->order
;
155 MLX5_SET(mkc
, mkc
, free
, 1);
156 MLX5_SET(mkc
, mkc
, umr_en
, 1);
157 MLX5_SET(mkc
, mkc
, access_mode_1_0
, ent
->access_mode
& 0x3);
158 MLX5_SET(mkc
, mkc
, access_mode_4_2
, (ent
->access_mode
>> 2) & 0x7);
160 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
161 MLX5_SET(mkc
, mkc
, translations_octword_size
, ent
->xlt
);
162 MLX5_SET(mkc
, mkc
, log_page_size
, ent
->page
);
166 /* Asynchronously schedule new MRs to be populated in the cache. */
167 static int add_keys(struct mlx5_cache_ent
*ent
, unsigned int num
)
169 size_t inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
170 struct mlx5_ib_mr
*mr
;
176 in
= kzalloc(inlen
, GFP_KERNEL
);
180 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
181 for (i
= 0; i
< num
; i
++) {
182 mr
= alloc_cache_mr(ent
, mkc
);
187 spin_lock_irq(&ent
->lock
);
188 if (ent
->pending
>= MAX_PENDING_REG_MR
) {
190 spin_unlock_irq(&ent
->lock
);
195 spin_unlock_irq(&ent
->lock
);
196 err
= mlx5_ib_create_mkey_cb(ent
->dev
, &mr
->mmkey
,
197 &ent
->dev
->async_ctx
, in
, inlen
,
198 mr
->out
, sizeof(mr
->out
),
201 spin_lock_irq(&ent
->lock
);
203 spin_unlock_irq(&ent
->lock
);
204 mlx5_ib_warn(ent
->dev
, "create mkey failed %d\n", err
);
214 /* Synchronously create a MR in the cache */
215 static struct mlx5_ib_mr
*create_cache_mr(struct mlx5_cache_ent
*ent
)
217 size_t inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
218 struct mlx5_ib_mr
*mr
;
223 in
= kzalloc(inlen
, GFP_KERNEL
);
225 return ERR_PTR(-ENOMEM
);
226 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
228 mr
= alloc_cache_mr(ent
, mkc
);
234 err
= mlx5_core_create_mkey(ent
->dev
->mdev
, &mr
->mmkey
, in
, inlen
);
238 mr
->mmkey
.type
= MLX5_MKEY_MR
;
239 WRITE_ONCE(ent
->dev
->cache
.last_add
, jiffies
);
240 spin_lock_irq(&ent
->lock
);
242 spin_unlock_irq(&ent
->lock
);
252 static void remove_cache_mr_locked(struct mlx5_cache_ent
*ent
)
254 struct mlx5_ib_mr
*mr
;
256 lockdep_assert_held(&ent
->lock
);
257 if (list_empty(&ent
->head
))
259 mr
= list_first_entry(&ent
->head
, struct mlx5_ib_mr
, list
);
261 ent
->available_mrs
--;
263 spin_unlock_irq(&ent
->lock
);
264 mlx5_core_destroy_mkey(ent
->dev
->mdev
, &mr
->mmkey
);
266 spin_lock_irq(&ent
->lock
);
269 static int resize_available_mrs(struct mlx5_cache_ent
*ent
, unsigned int target
,
274 lockdep_assert_held(&ent
->lock
);
278 target
= ent
->limit
* 2;
279 if (target
== ent
->available_mrs
+ ent
->pending
)
281 if (target
> ent
->available_mrs
+ ent
->pending
) {
282 u32 todo
= target
- (ent
->available_mrs
+ ent
->pending
);
284 spin_unlock_irq(&ent
->lock
);
285 err
= add_keys(ent
, todo
);
287 usleep_range(3000, 5000);
288 spin_lock_irq(&ent
->lock
);
295 remove_cache_mr_locked(ent
);
300 static ssize_t
size_write(struct file
*filp
, const char __user
*buf
,
301 size_t count
, loff_t
*pos
)
303 struct mlx5_cache_ent
*ent
= filp
->private_data
;
307 err
= kstrtou32_from_user(buf
, count
, 0, &target
);
312 * Target is the new value of total_mrs the user requests, however we
313 * cannot free MRs that are in use. Compute the target value for
316 spin_lock_irq(&ent
->lock
);
317 if (target
< ent
->total_mrs
- ent
->available_mrs
) {
321 target
= target
- (ent
->total_mrs
- ent
->available_mrs
);
322 if (target
< ent
->limit
|| target
> ent
->limit
*2) {
326 err
= resize_available_mrs(ent
, target
, false);
329 spin_unlock_irq(&ent
->lock
);
334 spin_unlock_irq(&ent
->lock
);
338 static ssize_t
size_read(struct file
*filp
, char __user
*buf
, size_t count
,
341 struct mlx5_cache_ent
*ent
= filp
->private_data
;
345 err
= snprintf(lbuf
, sizeof(lbuf
), "%d\n", ent
->total_mrs
);
349 return simple_read_from_buffer(buf
, count
, pos
, lbuf
, err
);
352 static const struct file_operations size_fops
= {
353 .owner
= THIS_MODULE
,
359 static ssize_t
limit_write(struct file
*filp
, const char __user
*buf
,
360 size_t count
, loff_t
*pos
)
362 struct mlx5_cache_ent
*ent
= filp
->private_data
;
366 err
= kstrtou32_from_user(buf
, count
, 0, &var
);
371 * Upon set we immediately fill the cache to high water mark implied by
374 spin_lock_irq(&ent
->lock
);
376 err
= resize_available_mrs(ent
, 0, true);
377 spin_unlock_irq(&ent
->lock
);
383 static ssize_t
limit_read(struct file
*filp
, char __user
*buf
, size_t count
,
386 struct mlx5_cache_ent
*ent
= filp
->private_data
;
390 err
= snprintf(lbuf
, sizeof(lbuf
), "%d\n", ent
->limit
);
394 return simple_read_from_buffer(buf
, count
, pos
, lbuf
, err
);
397 static const struct file_operations limit_fops
= {
398 .owner
= THIS_MODULE
,
400 .write
= limit_write
,
404 static bool someone_adding(struct mlx5_mr_cache
*cache
)
408 for (i
= 0; i
< MAX_MR_CACHE_ENTRIES
; i
++) {
409 struct mlx5_cache_ent
*ent
= &cache
->ent
[i
];
412 spin_lock_irq(&ent
->lock
);
413 ret
= ent
->available_mrs
< ent
->limit
;
414 spin_unlock_irq(&ent
->lock
);
422 * Check if the bucket is outside the high/low water mark and schedule an async
423 * update. The cache refill has hysteresis, once the low water mark is hit it is
424 * refilled up to the high mark.
426 static void queue_adjust_cache_locked(struct mlx5_cache_ent
*ent
)
428 lockdep_assert_held(&ent
->lock
);
430 if (ent
->disabled
|| READ_ONCE(ent
->dev
->fill_delay
))
432 if (ent
->available_mrs
< ent
->limit
) {
433 ent
->fill_to_high_water
= true;
434 queue_work(ent
->dev
->cache
.wq
, &ent
->work
);
435 } else if (ent
->fill_to_high_water
&&
436 ent
->available_mrs
+ ent
->pending
< 2 * ent
->limit
) {
438 * Once we start populating due to hitting a low water mark
439 * continue until we pass the high water mark.
441 queue_work(ent
->dev
->cache
.wq
, &ent
->work
);
442 } else if (ent
->available_mrs
== 2 * ent
->limit
) {
443 ent
->fill_to_high_water
= false;
444 } else if (ent
->available_mrs
> 2 * ent
->limit
) {
445 /* Queue deletion of excess entries */
446 ent
->fill_to_high_water
= false;
448 queue_delayed_work(ent
->dev
->cache
.wq
, &ent
->dwork
,
449 msecs_to_jiffies(1000));
451 queue_work(ent
->dev
->cache
.wq
, &ent
->work
);
455 static void __cache_work_func(struct mlx5_cache_ent
*ent
)
457 struct mlx5_ib_dev
*dev
= ent
->dev
;
458 struct mlx5_mr_cache
*cache
= &dev
->cache
;
461 spin_lock_irq(&ent
->lock
);
465 if (ent
->fill_to_high_water
&&
466 ent
->available_mrs
+ ent
->pending
< 2 * ent
->limit
&&
467 !READ_ONCE(dev
->fill_delay
)) {
468 spin_unlock_irq(&ent
->lock
);
469 err
= add_keys(ent
, 1);
470 spin_lock_irq(&ent
->lock
);
475 * EAGAIN only happens if pending is positive, so we
476 * will be rescheduled from reg_mr_callback(). The only
477 * failure path here is ENOMEM.
479 if (err
!= -EAGAIN
) {
482 "command failed order %d, err %d\n",
484 queue_delayed_work(cache
->wq
, &ent
->dwork
,
485 msecs_to_jiffies(1000));
488 } else if (ent
->available_mrs
> 2 * ent
->limit
) {
492 * The remove_cache_mr() logic is performed as garbage
493 * collection task. Such task is intended to be run when no
494 * other active processes are running.
496 * The need_resched() will return TRUE if there are user tasks
497 * to be activated in near future.
499 * In such case, we don't execute remove_cache_mr() and postpone
500 * the garbage collection work to try to run in next cycle, in
501 * order to free CPU resources to other tasks.
503 spin_unlock_irq(&ent
->lock
);
504 need_delay
= need_resched() || someone_adding(cache
) ||
506 READ_ONCE(cache
->last_add
) + 300 * HZ
);
507 spin_lock_irq(&ent
->lock
);
511 queue_delayed_work(cache
->wq
, &ent
->dwork
, 300 * HZ
);
512 remove_cache_mr_locked(ent
);
513 queue_adjust_cache_locked(ent
);
516 spin_unlock_irq(&ent
->lock
);
519 static void delayed_cache_work_func(struct work_struct
*work
)
521 struct mlx5_cache_ent
*ent
;
523 ent
= container_of(work
, struct mlx5_cache_ent
, dwork
.work
);
524 __cache_work_func(ent
);
527 static void cache_work_func(struct work_struct
*work
)
529 struct mlx5_cache_ent
*ent
;
531 ent
= container_of(work
, struct mlx5_cache_ent
, work
);
532 __cache_work_func(ent
);
535 /* Allocate a special entry from the cache */
536 struct mlx5_ib_mr
*mlx5_mr_cache_alloc(struct mlx5_ib_dev
*dev
,
539 struct mlx5_mr_cache
*cache
= &dev
->cache
;
540 struct mlx5_cache_ent
*ent
;
541 struct mlx5_ib_mr
*mr
;
543 if (WARN_ON(entry
<= MR_CACHE_LAST_STD_ENTRY
||
544 entry
>= ARRAY_SIZE(cache
->ent
)))
545 return ERR_PTR(-EINVAL
);
547 ent
= &cache
->ent
[entry
];
548 spin_lock_irq(&ent
->lock
);
549 if (list_empty(&ent
->head
)) {
550 spin_unlock_irq(&ent
->lock
);
551 mr
= create_cache_mr(ent
);
555 mr
= list_first_entry(&ent
->head
, struct mlx5_ib_mr
, list
);
557 ent
->available_mrs
--;
558 queue_adjust_cache_locked(ent
);
559 spin_unlock_irq(&ent
->lock
);
564 /* Return a MR already available in the cache */
565 static struct mlx5_ib_mr
*get_cache_mr(struct mlx5_cache_ent
*req_ent
)
567 struct mlx5_ib_dev
*dev
= req_ent
->dev
;
568 struct mlx5_ib_mr
*mr
= NULL
;
569 struct mlx5_cache_ent
*ent
= req_ent
;
571 /* Try larger MR pools from the cache to satisfy the allocation */
572 for (; ent
!= &dev
->cache
.ent
[MR_CACHE_LAST_STD_ENTRY
+ 1]; ent
++) {
573 mlx5_ib_dbg(dev
, "order %u, cache index %zu\n", ent
->order
,
574 ent
- dev
->cache
.ent
);
576 spin_lock_irq(&ent
->lock
);
577 if (!list_empty(&ent
->head
)) {
578 mr
= list_first_entry(&ent
->head
, struct mlx5_ib_mr
,
581 ent
->available_mrs
--;
582 queue_adjust_cache_locked(ent
);
583 spin_unlock_irq(&ent
->lock
);
586 queue_adjust_cache_locked(ent
);
587 spin_unlock_irq(&ent
->lock
);
596 static void detach_mr_from_cache(struct mlx5_ib_mr
*mr
)
598 struct mlx5_cache_ent
*ent
= mr
->cache_ent
;
600 mr
->cache_ent
= NULL
;
601 spin_lock_irq(&ent
->lock
);
603 spin_unlock_irq(&ent
->lock
);
606 void mlx5_mr_cache_free(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
)
608 struct mlx5_cache_ent
*ent
= mr
->cache_ent
;
613 if (mlx5_mr_cache_invalidate(mr
)) {
614 detach_mr_from_cache(mr
);
615 destroy_mkey(dev
, mr
);
619 spin_lock_irq(&ent
->lock
);
620 list_add_tail(&mr
->list
, &ent
->head
);
621 ent
->available_mrs
++;
622 queue_adjust_cache_locked(ent
);
623 spin_unlock_irq(&ent
->lock
);
626 static void clean_keys(struct mlx5_ib_dev
*dev
, int c
)
628 struct mlx5_mr_cache
*cache
= &dev
->cache
;
629 struct mlx5_cache_ent
*ent
= &cache
->ent
[c
];
630 struct mlx5_ib_mr
*tmp_mr
;
631 struct mlx5_ib_mr
*mr
;
634 cancel_delayed_work(&ent
->dwork
);
636 spin_lock_irq(&ent
->lock
);
637 if (list_empty(&ent
->head
)) {
638 spin_unlock_irq(&ent
->lock
);
641 mr
= list_first_entry(&ent
->head
, struct mlx5_ib_mr
, list
);
642 list_move(&mr
->list
, &del_list
);
643 ent
->available_mrs
--;
645 spin_unlock_irq(&ent
->lock
);
646 mlx5_core_destroy_mkey(dev
->mdev
, &mr
->mmkey
);
649 list_for_each_entry_safe(mr
, tmp_mr
, &del_list
, list
) {
655 static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev
*dev
)
657 if (!mlx5_debugfs_root
|| dev
->is_rep
)
660 debugfs_remove_recursive(dev
->cache
.root
);
661 dev
->cache
.root
= NULL
;
664 static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev
*dev
)
666 struct mlx5_mr_cache
*cache
= &dev
->cache
;
667 struct mlx5_cache_ent
*ent
;
671 if (!mlx5_debugfs_root
|| dev
->is_rep
)
674 cache
->root
= debugfs_create_dir("mr_cache", dev
->mdev
->priv
.dbg_root
);
676 for (i
= 0; i
< MAX_MR_CACHE_ENTRIES
; i
++) {
677 ent
= &cache
->ent
[i
];
678 sprintf(ent
->name
, "%d", ent
->order
);
679 dir
= debugfs_create_dir(ent
->name
, cache
->root
);
680 debugfs_create_file("size", 0600, dir
, ent
, &size_fops
);
681 debugfs_create_file("limit", 0600, dir
, ent
, &limit_fops
);
682 debugfs_create_u32("cur", 0400, dir
, &ent
->available_mrs
);
683 debugfs_create_u32("miss", 0600, dir
, &ent
->miss
);
687 static void delay_time_func(struct timer_list
*t
)
689 struct mlx5_ib_dev
*dev
= from_timer(dev
, t
, delay_timer
);
691 WRITE_ONCE(dev
->fill_delay
, 0);
694 int mlx5_mr_cache_init(struct mlx5_ib_dev
*dev
)
696 struct mlx5_mr_cache
*cache
= &dev
->cache
;
697 struct mlx5_cache_ent
*ent
;
700 mutex_init(&dev
->slow_path_mutex
);
701 cache
->wq
= alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM
);
703 mlx5_ib_warn(dev
, "failed to create work queue\n");
707 mlx5_cmd_init_async_ctx(dev
->mdev
, &dev
->async_ctx
);
708 timer_setup(&dev
->delay_timer
, delay_time_func
, 0);
709 for (i
= 0; i
< MAX_MR_CACHE_ENTRIES
; i
++) {
710 ent
= &cache
->ent
[i
];
711 INIT_LIST_HEAD(&ent
->head
);
712 spin_lock_init(&ent
->lock
);
717 INIT_WORK(&ent
->work
, cache_work_func
);
718 INIT_DELAYED_WORK(&ent
->dwork
, delayed_cache_work_func
);
720 if (i
> MR_CACHE_LAST_STD_ENTRY
) {
721 mlx5_odp_init_mr_cache_entry(ent
);
725 if (ent
->order
> mr_cache_max_order(dev
))
728 ent
->page
= PAGE_SHIFT
;
729 ent
->xlt
= (1 << ent
->order
) * sizeof(struct mlx5_mtt
) /
730 MLX5_IB_UMR_OCTOWORD
;
731 ent
->access_mode
= MLX5_MKC_ACCESS_MODE_MTT
;
732 if ((dev
->mdev
->profile
->mask
& MLX5_PROF_MASK_MR_CACHE
) &&
734 mlx5_core_is_pf(dev
->mdev
))
735 ent
->limit
= dev
->mdev
->profile
->mr_cache
[i
].limit
;
738 spin_lock_irq(&ent
->lock
);
739 queue_adjust_cache_locked(ent
);
740 spin_unlock_irq(&ent
->lock
);
743 mlx5_mr_cache_debugfs_init(dev
);
748 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev
*dev
)
755 for (i
= 0; i
< MAX_MR_CACHE_ENTRIES
; i
++) {
756 struct mlx5_cache_ent
*ent
= &dev
->cache
.ent
[i
];
758 spin_lock_irq(&ent
->lock
);
759 ent
->disabled
= true;
760 spin_unlock_irq(&ent
->lock
);
761 cancel_work_sync(&ent
->work
);
762 cancel_delayed_work_sync(&ent
->dwork
);
765 mlx5_mr_cache_debugfs_cleanup(dev
);
766 mlx5_cmd_cleanup_async_ctx(&dev
->async_ctx
);
768 for (i
= 0; i
< MAX_MR_CACHE_ENTRIES
; i
++)
771 destroy_workqueue(dev
->cache
.wq
);
772 del_timer_sync(&dev
->delay_timer
);
777 static void set_mkc_access_pd_addr_fields(void *mkc
, int acc
, u64 start_addr
,
780 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
782 MLX5_SET(mkc
, mkc
, a
, !!(acc
& IB_ACCESS_REMOTE_ATOMIC
));
783 MLX5_SET(mkc
, mkc
, rw
, !!(acc
& IB_ACCESS_REMOTE_WRITE
));
784 MLX5_SET(mkc
, mkc
, rr
, !!(acc
& IB_ACCESS_REMOTE_READ
));
785 MLX5_SET(mkc
, mkc
, lw
, !!(acc
& IB_ACCESS_LOCAL_WRITE
));
786 MLX5_SET(mkc
, mkc
, lr
, 1);
788 if (MLX5_CAP_GEN(dev
->mdev
, relaxed_ordering_write
))
789 MLX5_SET(mkc
, mkc
, relaxed_ordering_write
,
790 !!(acc
& IB_ACCESS_RELAXED_ORDERING
));
791 if (MLX5_CAP_GEN(dev
->mdev
, relaxed_ordering_read
))
792 MLX5_SET(mkc
, mkc
, relaxed_ordering_read
,
793 !!(acc
& IB_ACCESS_RELAXED_ORDERING
));
795 MLX5_SET(mkc
, mkc
, pd
, to_mpd(pd
)->pdn
);
796 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
797 MLX5_SET64(mkc
, mkc
, start_addr
, start_addr
);
800 struct ib_mr
*mlx5_ib_get_dma_mr(struct ib_pd
*pd
, int acc
)
802 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
803 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
804 struct mlx5_ib_mr
*mr
;
809 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
811 return ERR_PTR(-ENOMEM
);
813 in
= kzalloc(inlen
, GFP_KERNEL
);
819 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
821 MLX5_SET(mkc
, mkc
, access_mode_1_0
, MLX5_MKC_ACCESS_MODE_PA
);
822 MLX5_SET(mkc
, mkc
, length64
, 1);
823 set_mkc_access_pd_addr_fields(mkc
, acc
, 0, pd
);
825 err
= mlx5_ib_create_mkey(dev
, &mr
->mmkey
, in
, inlen
);
830 mr
->mmkey
.type
= MLX5_MKEY_MR
;
831 mr
->ibmr
.lkey
= mr
->mmkey
.key
;
832 mr
->ibmr
.rkey
= mr
->mmkey
.key
;
846 static int get_octo_len(u64 addr
, u64 len
, int page_shift
)
848 u64 page_size
= 1ULL << page_shift
;
852 offset
= addr
& (page_size
- 1);
853 npages
= ALIGN(len
+ offset
, page_size
) >> page_shift
;
854 return (npages
+ 1) / 2;
857 static int mr_cache_max_order(struct mlx5_ib_dev
*dev
)
859 if (MLX5_CAP_GEN(dev
->mdev
, umr_extended_translation_offset
))
860 return MR_CACHE_LAST_STD_ENTRY
+ 2;
861 return MLX5_MAX_UMR_SHIFT
;
864 static int mr_umem_get(struct mlx5_ib_dev
*dev
, u64 start
, u64 length
,
865 int access_flags
, struct ib_umem
**umem
, int *npages
,
866 int *page_shift
, int *ncont
, int *order
)
872 if (access_flags
& IB_ACCESS_ON_DEMAND
) {
873 struct ib_umem_odp
*odp
;
875 odp
= ib_umem_odp_get(&dev
->ib_dev
, start
, length
, access_flags
,
878 mlx5_ib_dbg(dev
, "umem get failed (%ld)\n",
885 *page_shift
= odp
->page_shift
;
886 *ncont
= ib_umem_odp_num_pages(odp
);
887 *npages
= *ncont
<< (*page_shift
- PAGE_SHIFT
);
889 *order
= ilog2(roundup_pow_of_two(*ncont
));
891 u
= ib_umem_get(&dev
->ib_dev
, start
, length
, access_flags
);
893 mlx5_ib_dbg(dev
, "umem get failed (%ld)\n", PTR_ERR(u
));
897 mlx5_ib_cont_pages(u
, start
, MLX5_MKEY_PAGE_SHIFT_MASK
, npages
,
898 page_shift
, ncont
, order
);
902 mlx5_ib_warn(dev
, "avoid zero region\n");
909 mlx5_ib_dbg(dev
, "npages %d, ncont %d, order %d, page_shift %d\n",
910 *npages
, *ncont
, *order
, *page_shift
);
915 static void mlx5_ib_umr_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
917 struct mlx5_ib_umr_context
*context
=
918 container_of(wc
->wr_cqe
, struct mlx5_ib_umr_context
, cqe
);
920 context
->status
= wc
->status
;
921 complete(&context
->done
);
924 static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context
*context
)
926 context
->cqe
.done
= mlx5_ib_umr_done
;
927 context
->status
= -1;
928 init_completion(&context
->done
);
931 static int mlx5_ib_post_send_wait(struct mlx5_ib_dev
*dev
,
932 struct mlx5_umr_wr
*umrwr
)
934 struct umr_common
*umrc
= &dev
->umrc
;
935 const struct ib_send_wr
*bad
;
937 struct mlx5_ib_umr_context umr_context
;
939 mlx5_ib_init_umr_context(&umr_context
);
940 umrwr
->wr
.wr_cqe
= &umr_context
.cqe
;
943 err
= ib_post_send(umrc
->qp
, &umrwr
->wr
, &bad
);
945 mlx5_ib_warn(dev
, "UMR post send failed, err %d\n", err
);
947 wait_for_completion(&umr_context
.done
);
948 if (umr_context
.status
!= IB_WC_SUCCESS
) {
949 mlx5_ib_warn(dev
, "reg umr failed (%u)\n",
958 static struct mlx5_cache_ent
*mr_cache_ent_from_order(struct mlx5_ib_dev
*dev
,
961 struct mlx5_mr_cache
*cache
= &dev
->cache
;
963 if (order
< cache
->ent
[0].order
)
964 return &cache
->ent
[0];
965 order
= order
- cache
->ent
[0].order
;
966 if (order
> MR_CACHE_LAST_STD_ENTRY
)
968 return &cache
->ent
[order
];
971 static struct mlx5_ib_mr
*
972 alloc_mr_from_cache(struct ib_pd
*pd
, struct ib_umem
*umem
, u64 virt_addr
,
973 u64 len
, int npages
, int page_shift
, unsigned int order
,
976 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
977 struct mlx5_cache_ent
*ent
= mr_cache_ent_from_order(dev
, order
);
978 struct mlx5_ib_mr
*mr
;
981 return ERR_PTR(-E2BIG
);
982 mr
= get_cache_mr(ent
);
984 mr
= create_cache_mr(ent
);
991 mr
->access_flags
= access_flags
;
992 mr
->desc_size
= sizeof(struct mlx5_mtt
);
993 mr
->mmkey
.iova
= virt_addr
;
994 mr
->mmkey
.size
= len
;
995 mr
->mmkey
.pd
= to_mpd(pd
)->pdn
;
1000 #define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
1001 MLX5_UMR_MTT_ALIGNMENT)
1002 #define MLX5_SPARE_UMR_CHUNK 0x10000
1004 int mlx5_ib_update_xlt(struct mlx5_ib_mr
*mr
, u64 idx
, int npages
,
1005 int page_shift
, int flags
)
1007 struct mlx5_ib_dev
*dev
= mr
->dev
;
1008 struct device
*ddev
= dev
->ib_dev
.dev
.parent
;
1012 struct mlx5_umr_wr wr
;
1015 int desc_size
= (flags
& MLX5_IB_UPD_XLT_INDIRECT
)
1016 ? sizeof(struct mlx5_klm
)
1017 : sizeof(struct mlx5_mtt
);
1018 const int page_align
= MLX5_UMR_MTT_ALIGNMENT
/ desc_size
;
1019 const int page_mask
= page_align
- 1;
1020 size_t pages_mapped
= 0;
1021 size_t pages_to_map
= 0;
1022 size_t pages_iter
= 0;
1023 size_t size_to_map
= 0;
1025 bool use_emergency_page
= false;
1027 if ((flags
& MLX5_IB_UPD_XLT_INDIRECT
) &&
1028 !umr_can_use_indirect_mkey(dev
))
1031 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
1032 * so we need to align the offset and length accordingly
1034 if (idx
& page_mask
) {
1035 npages
+= idx
& page_mask
;
1039 gfp
= flags
& MLX5_IB_UPD_XLT_ATOMIC
? GFP_ATOMIC
: GFP_KERNEL
;
1040 gfp
|= __GFP_ZERO
| __GFP_NOWARN
;
1042 pages_to_map
= ALIGN(npages
, page_align
);
1043 size
= desc_size
* pages_to_map
;
1044 size
= min_t(int, size
, MLX5_MAX_UMR_CHUNK
);
1046 xlt
= (void *)__get_free_pages(gfp
, get_order(size
));
1047 if (!xlt
&& size
> MLX5_SPARE_UMR_CHUNK
) {
1048 mlx5_ib_dbg(dev
, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
1049 size
, get_order(size
), MLX5_SPARE_UMR_CHUNK
);
1051 size
= MLX5_SPARE_UMR_CHUNK
;
1052 xlt
= (void *)__get_free_pages(gfp
, get_order(size
));
1056 mlx5_ib_warn(dev
, "Using XLT emergency buffer\n");
1057 xlt
= (void *)mlx5_ib_get_xlt_emergency_page();
1059 memset(xlt
, 0, size
);
1060 use_emergency_page
= true;
1062 pages_iter
= size
/ desc_size
;
1063 dma
= dma_map_single(ddev
, xlt
, size
, DMA_TO_DEVICE
);
1064 if (dma_mapping_error(ddev
, dma
)) {
1065 mlx5_ib_err(dev
, "unable to map DMA during XLT update.\n");
1070 if (mr
->umem
->is_odp
) {
1071 if (!(flags
& MLX5_IB_UPD_XLT_INDIRECT
)) {
1072 struct ib_umem_odp
*odp
= to_ib_umem_odp(mr
->umem
);
1073 size_t max_pages
= ib_umem_odp_num_pages(odp
) - idx
;
1075 pages_to_map
= min_t(size_t, pages_to_map
, max_pages
);
1080 sg
.lkey
= dev
->umrc
.pd
->local_dma_lkey
;
1082 memset(&wr
, 0, sizeof(wr
));
1083 wr
.wr
.send_flags
= MLX5_IB_SEND_UMR_UPDATE_XLT
;
1084 if (!(flags
& MLX5_IB_UPD_XLT_ENABLE
))
1085 wr
.wr
.send_flags
|= MLX5_IB_SEND_UMR_FAIL_IF_FREE
;
1086 wr
.wr
.sg_list
= &sg
;
1088 wr
.wr
.opcode
= MLX5_IB_WR_UMR
;
1090 wr
.pd
= mr
->ibmr
.pd
;
1091 wr
.mkey
= mr
->mmkey
.key
;
1092 wr
.length
= mr
->mmkey
.size
;
1093 wr
.virt_addr
= mr
->mmkey
.iova
;
1094 wr
.access_flags
= mr
->access_flags
;
1095 wr
.page_shift
= page_shift
;
1097 for (pages_mapped
= 0;
1098 pages_mapped
< pages_to_map
&& !err
;
1099 pages_mapped
+= pages_iter
, idx
+= pages_iter
) {
1100 npages
= min_t(int, pages_iter
, pages_to_map
- pages_mapped
);
1101 size_to_map
= npages
* desc_size
;
1102 dma_sync_single_for_cpu(ddev
, dma
, size
, DMA_TO_DEVICE
);
1103 if (mr
->umem
->is_odp
) {
1104 mlx5_odp_populate_xlt(xlt
, idx
, npages
, mr
, flags
);
1106 __mlx5_ib_populate_pas(dev
, mr
->umem
, page_shift
, idx
,
1108 MLX5_IB_MTT_PRESENT
);
1109 /* Clear padding after the pages
1110 * brought from the umem.
1112 memset(xlt
+ size_to_map
, 0, size
- size_to_map
);
1114 dma_sync_single_for_device(ddev
, dma
, size
, DMA_TO_DEVICE
);
1116 sg
.length
= ALIGN(size_to_map
, MLX5_UMR_MTT_ALIGNMENT
);
1118 if (pages_mapped
+ pages_iter
>= pages_to_map
) {
1119 if (flags
& MLX5_IB_UPD_XLT_ENABLE
)
1121 MLX5_IB_SEND_UMR_ENABLE_MR
|
1122 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS
|
1123 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION
;
1124 if (flags
& MLX5_IB_UPD_XLT_PD
||
1125 flags
& MLX5_IB_UPD_XLT_ACCESS
)
1127 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS
;
1128 if (flags
& MLX5_IB_UPD_XLT_ADDR
)
1130 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION
;
1133 wr
.offset
= idx
* desc_size
;
1134 wr
.xlt_size
= sg
.length
;
1136 err
= mlx5_ib_post_send_wait(dev
, &wr
);
1138 dma_unmap_single(ddev
, dma
, size
, DMA_TO_DEVICE
);
1141 if (use_emergency_page
)
1142 mlx5_ib_put_xlt_emergency_page();
1144 free_pages((unsigned long)xlt
, get_order(size
));
1150 * If ibmr is NULL it will be allocated by reg_create.
1151 * Else, the given ibmr will be used.
1153 static struct mlx5_ib_mr
*reg_create(struct ib_mr
*ibmr
, struct ib_pd
*pd
,
1154 u64 virt_addr
, u64 length
,
1155 struct ib_umem
*umem
, int npages
,
1156 int page_shift
, int access_flags
,
1159 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1160 struct mlx5_ib_mr
*mr
;
1166 bool pg_cap
= !!(MLX5_CAP_GEN(dev
->mdev
, pg
));
1168 mr
= ibmr
? to_mmr(ibmr
) : kzalloc(sizeof(*mr
), GFP_KERNEL
);
1170 return ERR_PTR(-ENOMEM
);
1173 mr
->access_flags
= access_flags
;
1175 inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
1177 inlen
+= sizeof(*pas
) * roundup(npages
, 2);
1178 in
= kvzalloc(inlen
, GFP_KERNEL
);
1183 pas
= (__be64
*)MLX5_ADDR_OF(create_mkey_in
, in
, klm_pas_mtt
);
1184 if (populate
&& !(access_flags
& IB_ACCESS_ON_DEMAND
))
1185 mlx5_ib_populate_pas(dev
, umem
, page_shift
, pas
,
1186 pg_cap
? MLX5_IB_MTT_PRESENT
: 0);
1188 /* The pg_access bit allows setting the access flags
1189 * in the page list submitted with the command. */
1190 MLX5_SET(create_mkey_in
, in
, pg_access
, !!(pg_cap
));
1192 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
1193 MLX5_SET(mkc
, mkc
, free
, !populate
);
1194 MLX5_SET(mkc
, mkc
, access_mode_1_0
, MLX5_MKC_ACCESS_MODE_MTT
);
1195 if (MLX5_CAP_GEN(dev
->mdev
, relaxed_ordering_write
))
1196 MLX5_SET(mkc
, mkc
, relaxed_ordering_write
,
1197 !!(access_flags
& IB_ACCESS_RELAXED_ORDERING
));
1198 if (MLX5_CAP_GEN(dev
->mdev
, relaxed_ordering_read
))
1199 MLX5_SET(mkc
, mkc
, relaxed_ordering_read
,
1200 !!(access_flags
& IB_ACCESS_RELAXED_ORDERING
));
1201 MLX5_SET(mkc
, mkc
, a
, !!(access_flags
& IB_ACCESS_REMOTE_ATOMIC
));
1202 MLX5_SET(mkc
, mkc
, rw
, !!(access_flags
& IB_ACCESS_REMOTE_WRITE
));
1203 MLX5_SET(mkc
, mkc
, rr
, !!(access_flags
& IB_ACCESS_REMOTE_READ
));
1204 MLX5_SET(mkc
, mkc
, lw
, !!(access_flags
& IB_ACCESS_LOCAL_WRITE
));
1205 MLX5_SET(mkc
, mkc
, lr
, 1);
1206 MLX5_SET(mkc
, mkc
, umr_en
, 1);
1208 MLX5_SET64(mkc
, mkc
, start_addr
, virt_addr
);
1209 MLX5_SET64(mkc
, mkc
, len
, length
);
1210 MLX5_SET(mkc
, mkc
, pd
, to_mpd(pd
)->pdn
);
1211 MLX5_SET(mkc
, mkc
, bsf_octword_size
, 0);
1212 MLX5_SET(mkc
, mkc
, translations_octword_size
,
1213 get_octo_len(virt_addr
, length
, page_shift
));
1214 MLX5_SET(mkc
, mkc
, log_page_size
, page_shift
);
1215 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
1217 MLX5_SET(create_mkey_in
, in
, translations_octword_actual_size
,
1218 get_octo_len(virt_addr
, length
, page_shift
));
1221 err
= mlx5_ib_create_mkey(dev
, &mr
->mmkey
, in
, inlen
);
1223 mlx5_ib_warn(dev
, "create mkey failed\n");
1226 mr
->mmkey
.type
= MLX5_MKEY_MR
;
1227 mr
->desc_size
= sizeof(struct mlx5_mtt
);
1231 mlx5_ib_dbg(dev
, "mkey = 0x%x\n", mr
->mmkey
.key
);
1242 return ERR_PTR(err
);
1245 static void set_mr_fields(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
,
1246 int npages
, u64 length
, int access_flags
)
1248 mr
->npages
= npages
;
1249 atomic_add(npages
, &dev
->mdev
->priv
.reg_pages
);
1250 mr
->ibmr
.lkey
= mr
->mmkey
.key
;
1251 mr
->ibmr
.rkey
= mr
->mmkey
.key
;
1252 mr
->ibmr
.length
= length
;
1253 mr
->access_flags
= access_flags
;
1256 static struct ib_mr
*mlx5_ib_get_dm_mr(struct ib_pd
*pd
, u64 start_addr
,
1257 u64 length
, int acc
, int mode
)
1259 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1260 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
1261 struct mlx5_ib_mr
*mr
;
1266 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
1268 return ERR_PTR(-ENOMEM
);
1270 in
= kzalloc(inlen
, GFP_KERNEL
);
1276 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
1278 MLX5_SET(mkc
, mkc
, access_mode_1_0
, mode
& 0x3);
1279 MLX5_SET(mkc
, mkc
, access_mode_4_2
, (mode
>> 2) & 0x7);
1280 MLX5_SET64(mkc
, mkc
, len
, length
);
1281 set_mkc_access_pd_addr_fields(mkc
, acc
, start_addr
, pd
);
1283 err
= mlx5_ib_create_mkey(dev
, &mr
->mmkey
, in
, inlen
);
1290 set_mr_fields(dev
, mr
, 0, length
, acc
);
1300 return ERR_PTR(err
);
1303 int mlx5_ib_advise_mr(struct ib_pd
*pd
,
1304 enum ib_uverbs_advise_mr_advice advice
,
1306 struct ib_sge
*sg_list
,
1308 struct uverbs_attr_bundle
*attrs
)
1310 if (advice
!= IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH
&&
1311 advice
!= IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE
)
1314 return mlx5_ib_advise_mr_prefetch(pd
, advice
, flags
,
1318 struct ib_mr
*mlx5_ib_reg_dm_mr(struct ib_pd
*pd
, struct ib_dm
*dm
,
1319 struct ib_dm_mr_attr
*attr
,
1320 struct uverbs_attr_bundle
*attrs
)
1322 struct mlx5_ib_dm
*mdm
= to_mdm(dm
);
1323 struct mlx5_core_dev
*dev
= to_mdev(dm
->device
)->mdev
;
1324 u64 start_addr
= mdm
->dev_addr
+ attr
->offset
;
1327 switch (mdm
->type
) {
1328 case MLX5_IB_UAPI_DM_TYPE_MEMIC
:
1329 if (attr
->access_flags
& ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS
)
1330 return ERR_PTR(-EINVAL
);
1332 mode
= MLX5_MKC_ACCESS_MODE_MEMIC
;
1333 start_addr
-= pci_resource_start(dev
->pdev
, 0);
1335 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM
:
1336 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM
:
1337 if (attr
->access_flags
& ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS
)
1338 return ERR_PTR(-EINVAL
);
1340 mode
= MLX5_MKC_ACCESS_MODE_SW_ICM
;
1343 return ERR_PTR(-EINVAL
);
1346 return mlx5_ib_get_dm_mr(pd
, start_addr
, attr
->length
,
1347 attr
->access_flags
, mode
);
1350 struct ib_mr
*mlx5_ib_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
1351 u64 virt_addr
, int access_flags
,
1352 struct ib_udata
*udata
)
1354 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1355 struct mlx5_ib_mr
*mr
= NULL
;
1357 struct ib_umem
*umem
;
1364 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM
))
1365 return ERR_PTR(-EOPNOTSUPP
);
1367 mlx5_ib_dbg(dev
, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1368 start
, virt_addr
, length
, access_flags
);
1370 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING
) && !start
&&
1371 length
== U64_MAX
) {
1372 if (virt_addr
!= start
)
1373 return ERR_PTR(-EINVAL
);
1374 if (!(access_flags
& IB_ACCESS_ON_DEMAND
) ||
1375 !(dev
->odp_caps
.general_caps
& IB_ODP_SUPPORT_IMPLICIT
))
1376 return ERR_PTR(-EINVAL
);
1378 mr
= mlx5_ib_alloc_implicit_mr(to_mpd(pd
), udata
, access_flags
);
1380 return ERR_CAST(mr
);
1384 err
= mr_umem_get(dev
, start
, length
, access_flags
, &umem
,
1385 &npages
, &page_shift
, &ncont
, &order
);
1388 return ERR_PTR(err
);
1390 use_umr
= mlx5_ib_can_use_umr(dev
, true, access_flags
);
1392 if (order
<= mr_cache_max_order(dev
) && use_umr
) {
1393 mr
= alloc_mr_from_cache(pd
, umem
, virt_addr
, length
, ncont
,
1394 page_shift
, order
, access_flags
);
1395 if (PTR_ERR(mr
) == -EAGAIN
) {
1396 mlx5_ib_dbg(dev
, "cache empty for order %d\n", order
);
1399 } else if (!MLX5_CAP_GEN(dev
->mdev
, umr_extended_translation_offset
)) {
1400 if (access_flags
& IB_ACCESS_ON_DEMAND
) {
1402 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
1409 mutex_lock(&dev
->slow_path_mutex
);
1410 mr
= reg_create(NULL
, pd
, virt_addr
, length
, umem
, ncont
,
1411 page_shift
, access_flags
, !use_umr
);
1412 mutex_unlock(&dev
->slow_path_mutex
);
1420 mlx5_ib_dbg(dev
, "mkey 0x%x\n", mr
->mmkey
.key
);
1423 set_mr_fields(dev
, mr
, npages
, length
, access_flags
);
1426 int update_xlt_flags
= MLX5_IB_UPD_XLT_ENABLE
;
1428 if (access_flags
& IB_ACCESS_ON_DEMAND
)
1429 update_xlt_flags
|= MLX5_IB_UPD_XLT_ZAP
;
1431 err
= mlx5_ib_update_xlt(mr
, 0, ncont
, page_shift
,
1436 return ERR_PTR(err
);
1440 if (is_odp_mr(mr
)) {
1441 to_ib_umem_odp(mr
->umem
)->private = mr
;
1442 init_waitqueue_head(&mr
->q_deferred_work
);
1443 atomic_set(&mr
->num_deferred_work
, 0);
1444 err
= xa_err(xa_store(&dev
->odp_mkeys
,
1445 mlx5_base_mkey(mr
->mmkey
.key
), &mr
->mmkey
,
1449 return ERR_PTR(err
);
1455 ib_umem_release(umem
);
1456 return ERR_PTR(err
);
1460 * mlx5_mr_cache_invalidate - Fence all DMA on the MR
1461 * @mr: The MR to fence
1463 * Upon return the NIC will not be doing any DMA to the pages under the MR,
1464 * and any DMA inprogress will be completed. Failure of this function
1465 * indicates the HW has failed catastrophically.
1467 int mlx5_mr_cache_invalidate(struct mlx5_ib_mr
*mr
)
1469 struct mlx5_umr_wr umrwr
= {};
1471 if (mr
->dev
->mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
)
1474 umrwr
.wr
.send_flags
= MLX5_IB_SEND_UMR_DISABLE_MR
|
1475 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS
;
1476 umrwr
.wr
.opcode
= MLX5_IB_WR_UMR
;
1477 umrwr
.pd
= mr
->dev
->umrc
.pd
;
1478 umrwr
.mkey
= mr
->mmkey
.key
;
1479 umrwr
.ignore_free_state
= 1;
1481 return mlx5_ib_post_send_wait(mr
->dev
, &umrwr
);
1484 static int rereg_umr(struct ib_pd
*pd
, struct mlx5_ib_mr
*mr
,
1485 int access_flags
, int flags
)
1487 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1488 struct mlx5_umr_wr umrwr
= {};
1491 umrwr
.wr
.send_flags
= MLX5_IB_SEND_UMR_FAIL_IF_FREE
;
1493 umrwr
.wr
.opcode
= MLX5_IB_WR_UMR
;
1494 umrwr
.mkey
= mr
->mmkey
.key
;
1496 if (flags
& IB_MR_REREG_PD
|| flags
& IB_MR_REREG_ACCESS
) {
1498 umrwr
.access_flags
= access_flags
;
1499 umrwr
.wr
.send_flags
|= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS
;
1502 err
= mlx5_ib_post_send_wait(dev
, &umrwr
);
1507 int mlx5_ib_rereg_user_mr(struct ib_mr
*ib_mr
, int flags
, u64 start
,
1508 u64 length
, u64 virt_addr
, int new_access_flags
,
1509 struct ib_pd
*new_pd
, struct ib_udata
*udata
)
1511 struct mlx5_ib_dev
*dev
= to_mdev(ib_mr
->device
);
1512 struct mlx5_ib_mr
*mr
= to_mmr(ib_mr
);
1513 struct ib_pd
*pd
= (flags
& IB_MR_REREG_PD
) ? new_pd
: ib_mr
->pd
;
1514 int access_flags
= flags
& IB_MR_REREG_ACCESS
?
1525 mlx5_ib_dbg(dev
, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1526 start
, virt_addr
, length
, access_flags
);
1528 atomic_sub(mr
->npages
, &dev
->mdev
->priv
.reg_pages
);
1536 if (flags
& IB_MR_REREG_TRANS
) {
1540 addr
= mr
->umem
->address
;
1541 len
= mr
->umem
->length
;
1544 if (flags
!= IB_MR_REREG_PD
) {
1546 * Replace umem. This needs to be done whether or not UMR is
1549 flags
|= IB_MR_REREG_TRANS
;
1550 ib_umem_release(mr
->umem
);
1552 err
= mr_umem_get(dev
, addr
, len
, access_flags
, &mr
->umem
,
1553 &npages
, &page_shift
, &ncont
, &order
);
1558 if (!mlx5_ib_can_use_umr(dev
, true, access_flags
) ||
1559 (flags
& IB_MR_REREG_TRANS
&& !use_umr_mtt_update(mr
, addr
, len
))) {
1561 * UMR can't be used - MKey needs to be replaced.
1564 detach_mr_from_cache(mr
);
1565 err
= destroy_mkey(dev
, mr
);
1569 mr
= reg_create(ib_mr
, pd
, addr
, len
, mr
->umem
, ncont
,
1570 page_shift
, access_flags
, true);
1582 mr
->access_flags
= access_flags
;
1583 mr
->mmkey
.iova
= addr
;
1584 mr
->mmkey
.size
= len
;
1585 mr
->mmkey
.pd
= to_mpd(pd
)->pdn
;
1587 if (flags
& IB_MR_REREG_TRANS
) {
1588 upd_flags
= MLX5_IB_UPD_XLT_ADDR
;
1589 if (flags
& IB_MR_REREG_PD
)
1590 upd_flags
|= MLX5_IB_UPD_XLT_PD
;
1591 if (flags
& IB_MR_REREG_ACCESS
)
1592 upd_flags
|= MLX5_IB_UPD_XLT_ACCESS
;
1593 err
= mlx5_ib_update_xlt(mr
, 0, npages
, page_shift
,
1596 err
= rereg_umr(pd
, mr
, access_flags
, flags
);
1603 set_mr_fields(dev
, mr
, npages
, len
, access_flags
);
1608 ib_umem_release(mr
->umem
);
1616 mlx5_alloc_priv_descs(struct ib_device
*device
,
1617 struct mlx5_ib_mr
*mr
,
1621 int size
= ndescs
* desc_size
;
1625 add_size
= max_t(int, MLX5_UMR_ALIGN
- ARCH_KMALLOC_MINALIGN
, 0);
1627 mr
->descs_alloc
= kzalloc(size
+ add_size
, GFP_KERNEL
);
1628 if (!mr
->descs_alloc
)
1631 mr
->descs
= PTR_ALIGN(mr
->descs_alloc
, MLX5_UMR_ALIGN
);
1633 mr
->desc_map
= dma_map_single(device
->dev
.parent
, mr
->descs
,
1634 size
, DMA_TO_DEVICE
);
1635 if (dma_mapping_error(device
->dev
.parent
, mr
->desc_map
)) {
1642 kfree(mr
->descs_alloc
);
1648 mlx5_free_priv_descs(struct mlx5_ib_mr
*mr
)
1651 struct ib_device
*device
= mr
->ibmr
.device
;
1652 int size
= mr
->max_descs
* mr
->desc_size
;
1654 dma_unmap_single(device
->dev
.parent
, mr
->desc_map
,
1655 size
, DMA_TO_DEVICE
);
1656 kfree(mr
->descs_alloc
);
1661 static void clean_mr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
)
1664 if (mlx5_core_destroy_psv(dev
->mdev
,
1665 mr
->sig
->psv_memory
.psv_idx
))
1666 mlx5_ib_warn(dev
, "failed to destroy mem psv %d\n",
1667 mr
->sig
->psv_memory
.psv_idx
);
1668 if (mlx5_core_destroy_psv(dev
->mdev
,
1669 mr
->sig
->psv_wire
.psv_idx
))
1670 mlx5_ib_warn(dev
, "failed to destroy wire psv %d\n",
1671 mr
->sig
->psv_wire
.psv_idx
);
1672 xa_erase(&dev
->sig_mrs
, mlx5_base_mkey(mr
->mmkey
.key
));
1677 if (!mr
->cache_ent
) {
1678 destroy_mkey(dev
, mr
);
1679 mlx5_free_priv_descs(mr
);
1683 static void dereg_mr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
)
1685 int npages
= mr
->npages
;
1686 struct ib_umem
*umem
= mr
->umem
;
1690 mlx5_ib_fence_odp_mr(mr
);
1695 mlx5_mr_cache_free(dev
, mr
);
1699 ib_umem_release(umem
);
1700 atomic_sub(npages
, &dev
->mdev
->priv
.reg_pages
);
1704 int mlx5_ib_dereg_mr(struct ib_mr
*ibmr
, struct ib_udata
*udata
)
1706 struct mlx5_ib_mr
*mmr
= to_mmr(ibmr
);
1708 if (ibmr
->type
== IB_MR_TYPE_INTEGRITY
) {
1709 dereg_mr(to_mdev(mmr
->mtt_mr
->ibmr
.device
), mmr
->mtt_mr
);
1710 dereg_mr(to_mdev(mmr
->klm_mr
->ibmr
.device
), mmr
->klm_mr
);
1713 if (is_odp_mr(mmr
) && to_ib_umem_odp(mmr
->umem
)->is_implicit_odp
) {
1714 mlx5_ib_free_implicit_mr(mmr
);
1718 dereg_mr(to_mdev(ibmr
->device
), mmr
);
1723 static void mlx5_set_umr_free_mkey(struct ib_pd
*pd
, u32
*in
, int ndescs
,
1724 int access_mode
, int page_shift
)
1728 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
1730 MLX5_SET(mkc
, mkc
, free
, 1);
1731 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
1732 MLX5_SET(mkc
, mkc
, pd
, to_mpd(pd
)->pdn
);
1733 MLX5_SET(mkc
, mkc
, translations_octword_size
, ndescs
);
1734 MLX5_SET(mkc
, mkc
, access_mode_1_0
, access_mode
& 0x3);
1735 MLX5_SET(mkc
, mkc
, access_mode_4_2
, (access_mode
>> 2) & 0x7);
1736 MLX5_SET(mkc
, mkc
, umr_en
, 1);
1737 MLX5_SET(mkc
, mkc
, log_page_size
, page_shift
);
1740 static int _mlx5_alloc_mkey_descs(struct ib_pd
*pd
, struct mlx5_ib_mr
*mr
,
1741 int ndescs
, int desc_size
, int page_shift
,
1742 int access_mode
, u32
*in
, int inlen
)
1744 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1747 mr
->access_mode
= access_mode
;
1748 mr
->desc_size
= desc_size
;
1749 mr
->max_descs
= ndescs
;
1751 err
= mlx5_alloc_priv_descs(pd
->device
, mr
, ndescs
, desc_size
);
1755 mlx5_set_umr_free_mkey(pd
, in
, ndescs
, access_mode
, page_shift
);
1757 err
= mlx5_ib_create_mkey(dev
, &mr
->mmkey
, in
, inlen
);
1759 goto err_free_descs
;
1761 mr
->mmkey
.type
= MLX5_MKEY_MR
;
1762 mr
->ibmr
.lkey
= mr
->mmkey
.key
;
1763 mr
->ibmr
.rkey
= mr
->mmkey
.key
;
1768 mlx5_free_priv_descs(mr
);
1772 static struct mlx5_ib_mr
*mlx5_ib_alloc_pi_mr(struct ib_pd
*pd
,
1773 u32 max_num_sg
, u32 max_num_meta_sg
,
1774 int desc_size
, int access_mode
)
1776 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
1777 int ndescs
= ALIGN(max_num_sg
+ max_num_meta_sg
, 4);
1779 struct mlx5_ib_mr
*mr
;
1783 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
1785 return ERR_PTR(-ENOMEM
);
1788 mr
->ibmr
.device
= pd
->device
;
1790 in
= kzalloc(inlen
, GFP_KERNEL
);
1796 if (access_mode
== MLX5_MKC_ACCESS_MODE_MTT
)
1797 page_shift
= PAGE_SHIFT
;
1799 err
= _mlx5_alloc_mkey_descs(pd
, mr
, ndescs
, desc_size
, page_shift
,
1800 access_mode
, in
, inlen
);
1813 return ERR_PTR(err
);
1816 static int mlx5_alloc_mem_reg_descs(struct ib_pd
*pd
, struct mlx5_ib_mr
*mr
,
1817 int ndescs
, u32
*in
, int inlen
)
1819 return _mlx5_alloc_mkey_descs(pd
, mr
, ndescs
, sizeof(struct mlx5_mtt
),
1820 PAGE_SHIFT
, MLX5_MKC_ACCESS_MODE_MTT
, in
,
1824 static int mlx5_alloc_sg_gaps_descs(struct ib_pd
*pd
, struct mlx5_ib_mr
*mr
,
1825 int ndescs
, u32
*in
, int inlen
)
1827 return _mlx5_alloc_mkey_descs(pd
, mr
, ndescs
, sizeof(struct mlx5_klm
),
1828 0, MLX5_MKC_ACCESS_MODE_KLMS
, in
, inlen
);
1831 static int mlx5_alloc_integrity_descs(struct ib_pd
*pd
, struct mlx5_ib_mr
*mr
,
1832 int max_num_sg
, int max_num_meta_sg
,
1835 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1840 mr
->sig
= kzalloc(sizeof(*mr
->sig
), GFP_KERNEL
);
1844 /* create mem & wire PSVs */
1845 err
= mlx5_core_create_psv(dev
->mdev
, to_mpd(pd
)->pdn
, 2, psv_index
);
1849 mr
->sig
->psv_memory
.psv_idx
= psv_index
[0];
1850 mr
->sig
->psv_wire
.psv_idx
= psv_index
[1];
1852 mr
->sig
->sig_status_checked
= true;
1853 mr
->sig
->sig_err_exists
= false;
1854 /* Next UMR, Arm SIGERR */
1855 ++mr
->sig
->sigerr_count
;
1856 mr
->klm_mr
= mlx5_ib_alloc_pi_mr(pd
, max_num_sg
, max_num_meta_sg
,
1857 sizeof(struct mlx5_klm
),
1858 MLX5_MKC_ACCESS_MODE_KLMS
);
1859 if (IS_ERR(mr
->klm_mr
)) {
1860 err
= PTR_ERR(mr
->klm_mr
);
1861 goto err_destroy_psv
;
1863 mr
->mtt_mr
= mlx5_ib_alloc_pi_mr(pd
, max_num_sg
, max_num_meta_sg
,
1864 sizeof(struct mlx5_mtt
),
1865 MLX5_MKC_ACCESS_MODE_MTT
);
1866 if (IS_ERR(mr
->mtt_mr
)) {
1867 err
= PTR_ERR(mr
->mtt_mr
);
1868 goto err_free_klm_mr
;
1871 /* Set bsf descriptors for mkey */
1872 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
1873 MLX5_SET(mkc
, mkc
, bsf_en
, 1);
1874 MLX5_SET(mkc
, mkc
, bsf_octword_size
, MLX5_MKEY_BSF_OCTO_SIZE
);
1876 err
= _mlx5_alloc_mkey_descs(pd
, mr
, 4, sizeof(struct mlx5_klm
), 0,
1877 MLX5_MKC_ACCESS_MODE_KLMS
, in
, inlen
);
1879 goto err_free_mtt_mr
;
1881 err
= xa_err(xa_store(&dev
->sig_mrs
, mlx5_base_mkey(mr
->mmkey
.key
),
1882 mr
->sig
, GFP_KERNEL
));
1884 goto err_free_descs
;
1888 destroy_mkey(dev
, mr
);
1889 mlx5_free_priv_descs(mr
);
1891 dereg_mr(to_mdev(mr
->mtt_mr
->ibmr
.device
), mr
->mtt_mr
);
1894 dereg_mr(to_mdev(mr
->klm_mr
->ibmr
.device
), mr
->klm_mr
);
1897 if (mlx5_core_destroy_psv(dev
->mdev
, mr
->sig
->psv_memory
.psv_idx
))
1898 mlx5_ib_warn(dev
, "failed to destroy mem psv %d\n",
1899 mr
->sig
->psv_memory
.psv_idx
);
1900 if (mlx5_core_destroy_psv(dev
->mdev
, mr
->sig
->psv_wire
.psv_idx
))
1901 mlx5_ib_warn(dev
, "failed to destroy wire psv %d\n",
1902 mr
->sig
->psv_wire
.psv_idx
);
1909 static struct ib_mr
*__mlx5_ib_alloc_mr(struct ib_pd
*pd
,
1910 enum ib_mr_type mr_type
, u32 max_num_sg
,
1911 u32 max_num_meta_sg
)
1913 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1914 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
1915 int ndescs
= ALIGN(max_num_sg
, 4);
1916 struct mlx5_ib_mr
*mr
;
1920 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
1922 return ERR_PTR(-ENOMEM
);
1924 in
= kzalloc(inlen
, GFP_KERNEL
);
1930 mr
->ibmr
.device
= pd
->device
;
1934 case IB_MR_TYPE_MEM_REG
:
1935 err
= mlx5_alloc_mem_reg_descs(pd
, mr
, ndescs
, in
, inlen
);
1937 case IB_MR_TYPE_SG_GAPS
:
1938 err
= mlx5_alloc_sg_gaps_descs(pd
, mr
, ndescs
, in
, inlen
);
1940 case IB_MR_TYPE_INTEGRITY
:
1941 err
= mlx5_alloc_integrity_descs(pd
, mr
, max_num_sg
,
1942 max_num_meta_sg
, in
, inlen
);
1945 mlx5_ib_warn(dev
, "Invalid mr type %d\n", mr_type
);
1960 return ERR_PTR(err
);
1963 struct ib_mr
*mlx5_ib_alloc_mr(struct ib_pd
*pd
, enum ib_mr_type mr_type
,
1964 u32 max_num_sg
, struct ib_udata
*udata
)
1966 return __mlx5_ib_alloc_mr(pd
, mr_type
, max_num_sg
, 0);
1969 struct ib_mr
*mlx5_ib_alloc_mr_integrity(struct ib_pd
*pd
,
1970 u32 max_num_sg
, u32 max_num_meta_sg
)
1972 return __mlx5_ib_alloc_mr(pd
, IB_MR_TYPE_INTEGRITY
, max_num_sg
,
1976 struct ib_mw
*mlx5_ib_alloc_mw(struct ib_pd
*pd
, enum ib_mw_type type
,
1977 struct ib_udata
*udata
)
1979 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1980 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
1981 struct mlx5_ib_mw
*mw
= NULL
;
1986 struct mlx5_ib_alloc_mw req
= {};
1989 __u32 response_length
;
1992 err
= ib_copy_from_udata(&req
, udata
, min(udata
->inlen
, sizeof(req
)));
1994 return ERR_PTR(err
);
1996 if (req
.comp_mask
|| req
.reserved1
|| req
.reserved2
)
1997 return ERR_PTR(-EOPNOTSUPP
);
1999 if (udata
->inlen
> sizeof(req
) &&
2000 !ib_is_udata_cleared(udata
, sizeof(req
),
2001 udata
->inlen
- sizeof(req
)))
2002 return ERR_PTR(-EOPNOTSUPP
);
2004 ndescs
= req
.num_klms
? roundup(req
.num_klms
, 4) : roundup(1, 4);
2006 mw
= kzalloc(sizeof(*mw
), GFP_KERNEL
);
2007 in
= kzalloc(inlen
, GFP_KERNEL
);
2013 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
2015 MLX5_SET(mkc
, mkc
, free
, 1);
2016 MLX5_SET(mkc
, mkc
, translations_octword_size
, ndescs
);
2017 MLX5_SET(mkc
, mkc
, pd
, to_mpd(pd
)->pdn
);
2018 MLX5_SET(mkc
, mkc
, umr_en
, 1);
2019 MLX5_SET(mkc
, mkc
, lr
, 1);
2020 MLX5_SET(mkc
, mkc
, access_mode_1_0
, MLX5_MKC_ACCESS_MODE_KLMS
);
2021 MLX5_SET(mkc
, mkc
, en_rinval
, !!((type
== IB_MW_TYPE_2
)));
2022 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
2024 err
= mlx5_ib_create_mkey(dev
, &mw
->mmkey
, in
, inlen
);
2028 mw
->mmkey
.type
= MLX5_MKEY_MW
;
2029 mw
->ibmw
.rkey
= mw
->mmkey
.key
;
2030 mw
->ndescs
= ndescs
;
2032 resp
.response_length
= min(offsetof(typeof(resp
), response_length
) +
2033 sizeof(resp
.response_length
), udata
->outlen
);
2034 if (resp
.response_length
) {
2035 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
2037 mlx5_core_destroy_mkey(dev
->mdev
, &mw
->mmkey
);
2042 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING
)) {
2043 err
= xa_err(xa_store(&dev
->odp_mkeys
,
2044 mlx5_base_mkey(mw
->mmkey
.key
), &mw
->mmkey
,
2054 mlx5_core_destroy_mkey(dev
->mdev
, &mw
->mmkey
);
2058 return ERR_PTR(err
);
2061 int mlx5_ib_dealloc_mw(struct ib_mw
*mw
)
2063 struct mlx5_ib_dev
*dev
= to_mdev(mw
->device
);
2064 struct mlx5_ib_mw
*mmw
= to_mmw(mw
);
2067 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING
)) {
2068 xa_erase(&dev
->odp_mkeys
, mlx5_base_mkey(mmw
->mmkey
.key
));
2070 * pagefault_single_data_segment() may be accessing mmw under
2071 * SRCU if the user bound an ODP MR to this MW.
2073 synchronize_srcu(&dev
->odp_srcu
);
2076 err
= mlx5_core_destroy_mkey(dev
->mdev
, &mmw
->mmkey
);
2083 int mlx5_ib_check_mr_status(struct ib_mr
*ibmr
, u32 check_mask
,
2084 struct ib_mr_status
*mr_status
)
2086 struct mlx5_ib_mr
*mmr
= to_mmr(ibmr
);
2089 if (check_mask
& ~IB_MR_CHECK_SIG_STATUS
) {
2090 pr_err("Invalid status check mask\n");
2095 mr_status
->fail_status
= 0;
2096 if (check_mask
& IB_MR_CHECK_SIG_STATUS
) {
2099 pr_err("signature status check requested on a non-signature enabled MR\n");
2103 mmr
->sig
->sig_status_checked
= true;
2104 if (!mmr
->sig
->sig_err_exists
)
2107 if (ibmr
->lkey
== mmr
->sig
->err_item
.key
)
2108 memcpy(&mr_status
->sig_err
, &mmr
->sig
->err_item
,
2109 sizeof(mr_status
->sig_err
));
2111 mr_status
->sig_err
.err_type
= IB_SIG_BAD_GUARD
;
2112 mr_status
->sig_err
.sig_err_offset
= 0;
2113 mr_status
->sig_err
.key
= mmr
->sig
->err_item
.key
;
2116 mmr
->sig
->sig_err_exists
= false;
2117 mr_status
->fail_status
|= IB_MR_CHECK_SIG_STATUS
;
2125 mlx5_ib_map_pa_mr_sg_pi(struct ib_mr
*ibmr
, struct scatterlist
*data_sg
,
2126 int data_sg_nents
, unsigned int *data_sg_offset
,
2127 struct scatterlist
*meta_sg
, int meta_sg_nents
,
2128 unsigned int *meta_sg_offset
)
2130 struct mlx5_ib_mr
*mr
= to_mmr(ibmr
);
2131 unsigned int sg_offset
= 0;
2134 mr
->meta_length
= 0;
2135 if (data_sg_nents
== 1) {
2139 sg_offset
= *data_sg_offset
;
2140 mr
->data_length
= sg_dma_len(data_sg
) - sg_offset
;
2141 mr
->data_iova
= sg_dma_address(data_sg
) + sg_offset
;
2142 if (meta_sg_nents
== 1) {
2144 mr
->meta_ndescs
= 1;
2146 sg_offset
= *meta_sg_offset
;
2149 mr
->meta_length
= sg_dma_len(meta_sg
) - sg_offset
;
2150 mr
->pi_iova
= sg_dma_address(meta_sg
) + sg_offset
;
2152 ibmr
->length
= mr
->data_length
+ mr
->meta_length
;
2159 mlx5_ib_sg_to_klms(struct mlx5_ib_mr
*mr
,
2160 struct scatterlist
*sgl
,
2161 unsigned short sg_nents
,
2162 unsigned int *sg_offset_p
,
2163 struct scatterlist
*meta_sgl
,
2164 unsigned short meta_sg_nents
,
2165 unsigned int *meta_sg_offset_p
)
2167 struct scatterlist
*sg
= sgl
;
2168 struct mlx5_klm
*klms
= mr
->descs
;
2169 unsigned int sg_offset
= sg_offset_p
? *sg_offset_p
: 0;
2170 u32 lkey
= mr
->ibmr
.pd
->local_dma_lkey
;
2173 mr
->ibmr
.iova
= sg_dma_address(sg
) + sg_offset
;
2174 mr
->ibmr
.length
= 0;
2176 for_each_sg(sgl
, sg
, sg_nents
, i
) {
2177 if (unlikely(i
>= mr
->max_descs
))
2179 klms
[i
].va
= cpu_to_be64(sg_dma_address(sg
) + sg_offset
);
2180 klms
[i
].bcount
= cpu_to_be32(sg_dma_len(sg
) - sg_offset
);
2181 klms
[i
].key
= cpu_to_be32(lkey
);
2182 mr
->ibmr
.length
+= sg_dma_len(sg
) - sg_offset
;
2188 *sg_offset_p
= sg_offset
;
2191 mr
->data_length
= mr
->ibmr
.length
;
2193 if (meta_sg_nents
) {
2195 sg_offset
= meta_sg_offset_p
? *meta_sg_offset_p
: 0;
2196 for_each_sg(meta_sgl
, sg
, meta_sg_nents
, j
) {
2197 if (unlikely(i
+ j
>= mr
->max_descs
))
2199 klms
[i
+ j
].va
= cpu_to_be64(sg_dma_address(sg
) +
2201 klms
[i
+ j
].bcount
= cpu_to_be32(sg_dma_len(sg
) -
2203 klms
[i
+ j
].key
= cpu_to_be32(lkey
);
2204 mr
->ibmr
.length
+= sg_dma_len(sg
) - sg_offset
;
2208 if (meta_sg_offset_p
)
2209 *meta_sg_offset_p
= sg_offset
;
2211 mr
->meta_ndescs
= j
;
2212 mr
->meta_length
= mr
->ibmr
.length
- mr
->data_length
;
2218 static int mlx5_set_page(struct ib_mr
*ibmr
, u64 addr
)
2220 struct mlx5_ib_mr
*mr
= to_mmr(ibmr
);
2223 if (unlikely(mr
->ndescs
== mr
->max_descs
))
2227 descs
[mr
->ndescs
++] = cpu_to_be64(addr
| MLX5_EN_RD
| MLX5_EN_WR
);
2232 static int mlx5_set_page_pi(struct ib_mr
*ibmr
, u64 addr
)
2234 struct mlx5_ib_mr
*mr
= to_mmr(ibmr
);
2237 if (unlikely(mr
->ndescs
+ mr
->meta_ndescs
== mr
->max_descs
))
2241 descs
[mr
->ndescs
+ mr
->meta_ndescs
++] =
2242 cpu_to_be64(addr
| MLX5_EN_RD
| MLX5_EN_WR
);
2248 mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr
*ibmr
, struct scatterlist
*data_sg
,
2249 int data_sg_nents
, unsigned int *data_sg_offset
,
2250 struct scatterlist
*meta_sg
, int meta_sg_nents
,
2251 unsigned int *meta_sg_offset
)
2253 struct mlx5_ib_mr
*mr
= to_mmr(ibmr
);
2254 struct mlx5_ib_mr
*pi_mr
= mr
->mtt_mr
;
2258 pi_mr
->meta_ndescs
= 0;
2259 pi_mr
->meta_length
= 0;
2261 ib_dma_sync_single_for_cpu(ibmr
->device
, pi_mr
->desc_map
,
2262 pi_mr
->desc_size
* pi_mr
->max_descs
,
2265 pi_mr
->ibmr
.page_size
= ibmr
->page_size
;
2266 n
= ib_sg_to_pages(&pi_mr
->ibmr
, data_sg
, data_sg_nents
, data_sg_offset
,
2268 if (n
!= data_sg_nents
)
2271 pi_mr
->data_iova
= pi_mr
->ibmr
.iova
;
2272 pi_mr
->data_length
= pi_mr
->ibmr
.length
;
2273 pi_mr
->ibmr
.length
= pi_mr
->data_length
;
2274 ibmr
->length
= pi_mr
->data_length
;
2276 if (meta_sg_nents
) {
2277 u64 page_mask
= ~((u64
)ibmr
->page_size
- 1);
2278 u64 iova
= pi_mr
->data_iova
;
2280 n
+= ib_sg_to_pages(&pi_mr
->ibmr
, meta_sg
, meta_sg_nents
,
2281 meta_sg_offset
, mlx5_set_page_pi
);
2283 pi_mr
->meta_length
= pi_mr
->ibmr
.length
;
2285 * PI address for the HW is the offset of the metadata address
2286 * relative to the first data page address.
2287 * It equals to first data page address + size of data pages +
2288 * metadata offset at the first metadata page
2290 pi_mr
->pi_iova
= (iova
& page_mask
) +
2291 pi_mr
->ndescs
* ibmr
->page_size
+
2292 (pi_mr
->ibmr
.iova
& ~page_mask
);
2294 * In order to use one MTT MR for data and metadata, we register
2295 * also the gaps between the end of the data and the start of
2296 * the metadata (the sig MR will verify that the HW will access
2297 * to right addresses). This mapping is safe because we use
2298 * internal mkey for the registration.
2300 pi_mr
->ibmr
.length
= pi_mr
->pi_iova
+ pi_mr
->meta_length
- iova
;
2301 pi_mr
->ibmr
.iova
= iova
;
2302 ibmr
->length
+= pi_mr
->meta_length
;
2305 ib_dma_sync_single_for_device(ibmr
->device
, pi_mr
->desc_map
,
2306 pi_mr
->desc_size
* pi_mr
->max_descs
,
2313 mlx5_ib_map_klm_mr_sg_pi(struct ib_mr
*ibmr
, struct scatterlist
*data_sg
,
2314 int data_sg_nents
, unsigned int *data_sg_offset
,
2315 struct scatterlist
*meta_sg
, int meta_sg_nents
,
2316 unsigned int *meta_sg_offset
)
2318 struct mlx5_ib_mr
*mr
= to_mmr(ibmr
);
2319 struct mlx5_ib_mr
*pi_mr
= mr
->klm_mr
;
2323 pi_mr
->meta_ndescs
= 0;
2324 pi_mr
->meta_length
= 0;
2326 ib_dma_sync_single_for_cpu(ibmr
->device
, pi_mr
->desc_map
,
2327 pi_mr
->desc_size
* pi_mr
->max_descs
,
2330 n
= mlx5_ib_sg_to_klms(pi_mr
, data_sg
, data_sg_nents
, data_sg_offset
,
2331 meta_sg
, meta_sg_nents
, meta_sg_offset
);
2333 ib_dma_sync_single_for_device(ibmr
->device
, pi_mr
->desc_map
,
2334 pi_mr
->desc_size
* pi_mr
->max_descs
,
2337 /* This is zero-based memory region */
2338 pi_mr
->data_iova
= 0;
2339 pi_mr
->ibmr
.iova
= 0;
2340 pi_mr
->pi_iova
= pi_mr
->data_length
;
2341 ibmr
->length
= pi_mr
->ibmr
.length
;
2346 int mlx5_ib_map_mr_sg_pi(struct ib_mr
*ibmr
, struct scatterlist
*data_sg
,
2347 int data_sg_nents
, unsigned int *data_sg_offset
,
2348 struct scatterlist
*meta_sg
, int meta_sg_nents
,
2349 unsigned int *meta_sg_offset
)
2351 struct mlx5_ib_mr
*mr
= to_mmr(ibmr
);
2352 struct mlx5_ib_mr
*pi_mr
= NULL
;
2355 WARN_ON(ibmr
->type
!= IB_MR_TYPE_INTEGRITY
);
2358 mr
->data_length
= 0;
2360 mr
->meta_ndescs
= 0;
2363 * As a performance optimization, if possible, there is no need to
2364 * perform UMR operation to register the data/metadata buffers.
2365 * First try to map the sg lists to PA descriptors with local_dma_lkey.
2366 * Fallback to UMR only in case of a failure.
2368 n
= mlx5_ib_map_pa_mr_sg_pi(ibmr
, data_sg
, data_sg_nents
,
2369 data_sg_offset
, meta_sg
, meta_sg_nents
,
2371 if (n
== data_sg_nents
+ meta_sg_nents
)
2374 * As a performance optimization, if possible, there is no need to map
2375 * the sg lists to KLM descriptors. First try to map the sg lists to MTT
2376 * descriptors and fallback to KLM only in case of a failure.
2377 * It's more efficient for the HW to work with MTT descriptors
2378 * (especially in high load).
2379 * Use KLM (indirect access) only if it's mandatory.
2382 n
= mlx5_ib_map_mtt_mr_sg_pi(ibmr
, data_sg
, data_sg_nents
,
2383 data_sg_offset
, meta_sg
, meta_sg_nents
,
2385 if (n
== data_sg_nents
+ meta_sg_nents
)
2389 n
= mlx5_ib_map_klm_mr_sg_pi(ibmr
, data_sg
, data_sg_nents
,
2390 data_sg_offset
, meta_sg
, meta_sg_nents
,
2392 if (unlikely(n
!= data_sg_nents
+ meta_sg_nents
))
2396 /* This is zero-based memory region */
2400 ibmr
->sig_attrs
->meta_length
= pi_mr
->meta_length
;
2402 ibmr
->sig_attrs
->meta_length
= mr
->meta_length
;
2407 int mlx5_ib_map_mr_sg(struct ib_mr
*ibmr
, struct scatterlist
*sg
, int sg_nents
,
2408 unsigned int *sg_offset
)
2410 struct mlx5_ib_mr
*mr
= to_mmr(ibmr
);
2415 ib_dma_sync_single_for_cpu(ibmr
->device
, mr
->desc_map
,
2416 mr
->desc_size
* mr
->max_descs
,
2419 if (mr
->access_mode
== MLX5_MKC_ACCESS_MODE_KLMS
)
2420 n
= mlx5_ib_sg_to_klms(mr
, sg
, sg_nents
, sg_offset
, NULL
, 0,
2423 n
= ib_sg_to_pages(ibmr
, sg
, sg_nents
, sg_offset
,
2426 ib_dma_sync_single_for_device(ibmr
->device
, mr
->desc_map
,
2427 mr
->desc_size
* mr
->max_descs
,