2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kref.h>
35 #include <linux/random.h>
36 #include <linux/debugfs.h>
37 #include <linux/export.h>
38 #include <linux/delay.h>
39 #include <rdma/ib_umem.h>
40 #include <rdma/ib_umem_odp.h>
41 #include <rdma/ib_verbs.h>
45 MAX_PENDING_REG_MR
= 8,
48 #define MLX5_UMR_ALIGN 2048
49 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
50 static __be64 mlx5_ib_update_mtt_emergency_buffer
[
51 MLX5_UMR_MTT_MIN_CHUNK_SIZE
/sizeof(__be64
)]
52 __aligned(MLX5_UMR_ALIGN
);
53 static DEFINE_MUTEX(mlx5_ib_update_mtt_emergency_buffer_mutex
);
56 static int clean_mr(struct mlx5_ib_mr
*mr
);
58 static int destroy_mkey(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
)
60 int err
= mlx5_core_destroy_mkey(dev
->mdev
, &mr
->mmkey
);
62 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
63 /* Wait until all page fault handlers using the mr complete. */
64 synchronize_srcu(&dev
->mr_srcu
);
70 static int order2idx(struct mlx5_ib_dev
*dev
, int order
)
72 struct mlx5_mr_cache
*cache
= &dev
->cache
;
74 if (order
< cache
->ent
[0].order
)
77 return order
- cache
->ent
[0].order
;
80 static bool use_umr_mtt_update(struct mlx5_ib_mr
*mr
, u64 start
, u64 length
)
82 return ((u64
)1 << mr
->order
) * MLX5_ADAPTER_PAGE_SIZE
>=
83 length
+ (start
& (MLX5_ADAPTER_PAGE_SIZE
- 1));
86 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
87 static void update_odp_mr(struct mlx5_ib_mr
*mr
)
89 if (mr
->umem
->odp_data
) {
91 * This barrier prevents the compiler from moving the
92 * setting of umem->odp_data->private to point to our
93 * MR, before reg_umr finished, to ensure that the MR
94 * initialization have finished before starting to
95 * handle invalidations.
98 mr
->umem
->odp_data
->private = mr
;
100 * Make sure we will see the new
101 * umem->odp_data->private value in the invalidation
102 * routines, before we can get page faults on the
103 * MR. Page faults can happen once we put the MR in
104 * the tree, below this line. Without the barrier,
105 * there can be a fault handling and an invalidation
106 * before umem->odp_data->private == mr is visible to
107 * the invalidation handler.
114 static void reg_mr_callback(int status
, void *context
)
116 struct mlx5_ib_mr
*mr
= context
;
117 struct mlx5_ib_dev
*dev
= mr
->dev
;
118 struct mlx5_mr_cache
*cache
= &dev
->cache
;
119 int c
= order2idx(dev
, mr
->order
);
120 struct mlx5_cache_ent
*ent
= &cache
->ent
[c
];
123 struct mlx5_mkey_table
*table
= &dev
->mdev
->priv
.mkey_table
;
126 spin_lock_irqsave(&ent
->lock
, flags
);
128 spin_unlock_irqrestore(&ent
->lock
, flags
);
130 mlx5_ib_warn(dev
, "async reg mr failed. status %d\n", status
);
133 mod_timer(&dev
->delay_timer
, jiffies
+ HZ
);
137 spin_lock_irqsave(&dev
->mdev
->priv
.mkey_lock
, flags
);
138 key
= dev
->mdev
->priv
.mkey_key
++;
139 spin_unlock_irqrestore(&dev
->mdev
->priv
.mkey_lock
, flags
);
140 mr
->mmkey
.key
= mlx5_idx_to_mkey(MLX5_GET(create_mkey_out
, mr
->out
, mkey_index
)) | key
;
142 cache
->last_add
= jiffies
;
144 spin_lock_irqsave(&ent
->lock
, flags
);
145 list_add_tail(&mr
->list
, &ent
->head
);
148 spin_unlock_irqrestore(&ent
->lock
, flags
);
150 write_lock_irqsave(&table
->lock
, flags
);
151 err
= radix_tree_insert(&table
->tree
, mlx5_base_mkey(mr
->mmkey
.key
),
154 pr_err("Error inserting to mkey tree. 0x%x\n", -err
);
155 write_unlock_irqrestore(&table
->lock
, flags
);
158 static int add_keys(struct mlx5_ib_dev
*dev
, int c
, int num
)
160 struct mlx5_mr_cache
*cache
= &dev
->cache
;
161 struct mlx5_cache_ent
*ent
= &cache
->ent
[c
];
162 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
163 struct mlx5_ib_mr
*mr
;
164 int npages
= 1 << ent
->order
;
170 in
= kzalloc(inlen
, GFP_KERNEL
);
174 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
175 for (i
= 0; i
< num
; i
++) {
176 if (ent
->pending
>= MAX_PENDING_REG_MR
) {
181 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
186 mr
->order
= ent
->order
;
190 MLX5_SET(mkc
, mkc
, free
, 1);
191 MLX5_SET(mkc
, mkc
, umr_en
, 1);
192 MLX5_SET(mkc
, mkc
, access_mode
, MLX5_MKC_ACCESS_MODE_MTT
);
194 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
195 MLX5_SET(mkc
, mkc
, translations_octword_size
, (npages
+ 1) / 2);
196 MLX5_SET(mkc
, mkc
, log_page_size
, 12);
198 spin_lock_irq(&ent
->lock
);
200 spin_unlock_irq(&ent
->lock
);
201 err
= mlx5_core_create_mkey_cb(dev
->mdev
, &mr
->mmkey
,
203 mr
->out
, sizeof(mr
->out
),
204 reg_mr_callback
, mr
);
206 spin_lock_irq(&ent
->lock
);
208 spin_unlock_irq(&ent
->lock
);
209 mlx5_ib_warn(dev
, "create mkey failed %d\n", err
);
219 static void remove_keys(struct mlx5_ib_dev
*dev
, int c
, int num
)
221 struct mlx5_mr_cache
*cache
= &dev
->cache
;
222 struct mlx5_cache_ent
*ent
= &cache
->ent
[c
];
223 struct mlx5_ib_mr
*mr
;
227 for (i
= 0; i
< num
; i
++) {
228 spin_lock_irq(&ent
->lock
);
229 if (list_empty(&ent
->head
)) {
230 spin_unlock_irq(&ent
->lock
);
233 mr
= list_first_entry(&ent
->head
, struct mlx5_ib_mr
, list
);
237 spin_unlock_irq(&ent
->lock
);
238 err
= destroy_mkey(dev
, mr
);
240 mlx5_ib_warn(dev
, "failed destroy mkey\n");
246 static ssize_t
size_write(struct file
*filp
, const char __user
*buf
,
247 size_t count
, loff_t
*pos
)
249 struct mlx5_cache_ent
*ent
= filp
->private_data
;
250 struct mlx5_ib_dev
*dev
= ent
->dev
;
256 if (copy_from_user(lbuf
, buf
, sizeof(lbuf
)))
259 c
= order2idx(dev
, ent
->order
);
260 lbuf
[sizeof(lbuf
) - 1] = 0;
262 if (sscanf(lbuf
, "%u", &var
) != 1)
265 if (var
< ent
->limit
)
268 if (var
> ent
->size
) {
270 err
= add_keys(dev
, c
, var
- ent
->size
);
271 if (err
&& err
!= -EAGAIN
)
274 usleep_range(3000, 5000);
276 } else if (var
< ent
->size
) {
277 remove_keys(dev
, c
, ent
->size
- var
);
283 static ssize_t
size_read(struct file
*filp
, char __user
*buf
, size_t count
,
286 struct mlx5_cache_ent
*ent
= filp
->private_data
;
293 err
= snprintf(lbuf
, sizeof(lbuf
), "%d\n", ent
->size
);
297 if (copy_to_user(buf
, lbuf
, err
))
305 static const struct file_operations size_fops
= {
306 .owner
= THIS_MODULE
,
312 static ssize_t
limit_write(struct file
*filp
, const char __user
*buf
,
313 size_t count
, loff_t
*pos
)
315 struct mlx5_cache_ent
*ent
= filp
->private_data
;
316 struct mlx5_ib_dev
*dev
= ent
->dev
;
322 if (copy_from_user(lbuf
, buf
, sizeof(lbuf
)))
325 c
= order2idx(dev
, ent
->order
);
326 lbuf
[sizeof(lbuf
) - 1] = 0;
328 if (sscanf(lbuf
, "%u", &var
) != 1)
336 if (ent
->cur
< ent
->limit
) {
337 err
= add_keys(dev
, c
, 2 * ent
->limit
- ent
->cur
);
345 static ssize_t
limit_read(struct file
*filp
, char __user
*buf
, size_t count
,
348 struct mlx5_cache_ent
*ent
= filp
->private_data
;
355 err
= snprintf(lbuf
, sizeof(lbuf
), "%d\n", ent
->limit
);
359 if (copy_to_user(buf
, lbuf
, err
))
367 static const struct file_operations limit_fops
= {
368 .owner
= THIS_MODULE
,
370 .write
= limit_write
,
374 static int someone_adding(struct mlx5_mr_cache
*cache
)
378 for (i
= 0; i
< MAX_MR_CACHE_ENTRIES
; i
++) {
379 if (cache
->ent
[i
].cur
< cache
->ent
[i
].limit
)
386 static void __cache_work_func(struct mlx5_cache_ent
*ent
)
388 struct mlx5_ib_dev
*dev
= ent
->dev
;
389 struct mlx5_mr_cache
*cache
= &dev
->cache
;
390 int i
= order2idx(dev
, ent
->order
);
396 ent
= &dev
->cache
.ent
[i
];
397 if (ent
->cur
< 2 * ent
->limit
&& !dev
->fill_delay
) {
398 err
= add_keys(dev
, i
, 1);
399 if (ent
->cur
< 2 * ent
->limit
) {
400 if (err
== -EAGAIN
) {
401 mlx5_ib_dbg(dev
, "returned eagain, order %d\n",
403 queue_delayed_work(cache
->wq
, &ent
->dwork
,
404 msecs_to_jiffies(3));
406 mlx5_ib_warn(dev
, "command failed order %d, err %d\n",
408 queue_delayed_work(cache
->wq
, &ent
->dwork
,
409 msecs_to_jiffies(1000));
411 queue_work(cache
->wq
, &ent
->work
);
414 } else if (ent
->cur
> 2 * ent
->limit
) {
416 * The remove_keys() logic is performed as garbage collection
417 * task. Such task is intended to be run when no other active
418 * processes are running.
420 * The need_resched() will return TRUE if there are user tasks
421 * to be activated in near future.
423 * In such case, we don't execute remove_keys() and postpone
424 * the garbage collection work to try to run in next cycle,
425 * in order to free CPU resources to other tasks.
427 if (!need_resched() && !someone_adding(cache
) &&
428 time_after(jiffies
, cache
->last_add
+ 300 * HZ
)) {
429 remove_keys(dev
, i
, 1);
430 if (ent
->cur
> ent
->limit
)
431 queue_work(cache
->wq
, &ent
->work
);
433 queue_delayed_work(cache
->wq
, &ent
->dwork
, 300 * HZ
);
438 static void delayed_cache_work_func(struct work_struct
*work
)
440 struct mlx5_cache_ent
*ent
;
442 ent
= container_of(work
, struct mlx5_cache_ent
, dwork
.work
);
443 __cache_work_func(ent
);
446 static void cache_work_func(struct work_struct
*work
)
448 struct mlx5_cache_ent
*ent
;
450 ent
= container_of(work
, struct mlx5_cache_ent
, work
);
451 __cache_work_func(ent
);
454 static struct mlx5_ib_mr
*alloc_cached_mr(struct mlx5_ib_dev
*dev
, int order
)
456 struct mlx5_mr_cache
*cache
= &dev
->cache
;
457 struct mlx5_ib_mr
*mr
= NULL
;
458 struct mlx5_cache_ent
*ent
;
462 c
= order2idx(dev
, order
);
463 if (c
< 0 || c
>= MAX_MR_CACHE_ENTRIES
) {
464 mlx5_ib_warn(dev
, "order %d, cache index %d\n", order
, c
);
468 for (i
= c
; i
< MAX_MR_CACHE_ENTRIES
; i
++) {
469 ent
= &cache
->ent
[i
];
471 mlx5_ib_dbg(dev
, "order %d, cache index %d\n", ent
->order
, i
);
473 spin_lock_irq(&ent
->lock
);
474 if (!list_empty(&ent
->head
)) {
475 mr
= list_first_entry(&ent
->head
, struct mlx5_ib_mr
,
479 spin_unlock_irq(&ent
->lock
);
480 if (ent
->cur
< ent
->limit
)
481 queue_work(cache
->wq
, &ent
->work
);
484 spin_unlock_irq(&ent
->lock
);
486 queue_work(cache
->wq
, &ent
->work
);
490 cache
->ent
[c
].miss
++;
495 static void free_cached_mr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
)
497 struct mlx5_mr_cache
*cache
= &dev
->cache
;
498 struct mlx5_cache_ent
*ent
;
502 c
= order2idx(dev
, mr
->order
);
503 if (c
< 0 || c
>= MAX_MR_CACHE_ENTRIES
) {
504 mlx5_ib_warn(dev
, "order %d, cache index %d\n", mr
->order
, c
);
507 ent
= &cache
->ent
[c
];
508 spin_lock_irq(&ent
->lock
);
509 list_add_tail(&mr
->list
, &ent
->head
);
511 if (ent
->cur
> 2 * ent
->limit
)
513 spin_unlock_irq(&ent
->lock
);
516 queue_work(cache
->wq
, &ent
->work
);
519 static void clean_keys(struct mlx5_ib_dev
*dev
, int c
)
521 struct mlx5_mr_cache
*cache
= &dev
->cache
;
522 struct mlx5_cache_ent
*ent
= &cache
->ent
[c
];
523 struct mlx5_ib_mr
*mr
;
526 cancel_delayed_work(&ent
->dwork
);
528 spin_lock_irq(&ent
->lock
);
529 if (list_empty(&ent
->head
)) {
530 spin_unlock_irq(&ent
->lock
);
533 mr
= list_first_entry(&ent
->head
, struct mlx5_ib_mr
, list
);
537 spin_unlock_irq(&ent
->lock
);
538 err
= destroy_mkey(dev
, mr
);
540 mlx5_ib_warn(dev
, "failed destroy mkey\n");
546 static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev
*dev
)
548 struct mlx5_mr_cache
*cache
= &dev
->cache
;
549 struct mlx5_cache_ent
*ent
;
552 if (!mlx5_debugfs_root
)
555 cache
->root
= debugfs_create_dir("mr_cache", dev
->mdev
->priv
.dbg_root
);
559 for (i
= 0; i
< MAX_MR_CACHE_ENTRIES
; i
++) {
560 ent
= &cache
->ent
[i
];
561 sprintf(ent
->name
, "%d", ent
->order
);
562 ent
->dir
= debugfs_create_dir(ent
->name
, cache
->root
);
566 ent
->fsize
= debugfs_create_file("size", 0600, ent
->dir
, ent
,
571 ent
->flimit
= debugfs_create_file("limit", 0600, ent
->dir
, ent
,
576 ent
->fcur
= debugfs_create_u32("cur", 0400, ent
->dir
,
581 ent
->fmiss
= debugfs_create_u32("miss", 0600, ent
->dir
,
590 static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev
*dev
)
592 if (!mlx5_debugfs_root
)
595 debugfs_remove_recursive(dev
->cache
.root
);
598 static void delay_time_func(unsigned long ctx
)
600 struct mlx5_ib_dev
*dev
= (struct mlx5_ib_dev
*)ctx
;
605 int mlx5_mr_cache_init(struct mlx5_ib_dev
*dev
)
607 struct mlx5_mr_cache
*cache
= &dev
->cache
;
608 struct mlx5_cache_ent
*ent
;
613 mutex_init(&dev
->slow_path_mutex
);
614 cache
->wq
= alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM
);
616 mlx5_ib_warn(dev
, "failed to create work queue\n");
620 setup_timer(&dev
->delay_timer
, delay_time_func
, (unsigned long)dev
);
621 for (i
= 0; i
< MAX_MR_CACHE_ENTRIES
; i
++) {
622 INIT_LIST_HEAD(&cache
->ent
[i
].head
);
623 spin_lock_init(&cache
->ent
[i
].lock
);
625 ent
= &cache
->ent
[i
];
626 INIT_LIST_HEAD(&ent
->head
);
627 spin_lock_init(&ent
->lock
);
631 if ((dev
->mdev
->profile
->mask
& MLX5_PROF_MASK_MR_CACHE
) &&
632 (mlx5_core_is_pf(dev
->mdev
)))
633 limit
= dev
->mdev
->profile
->mr_cache
[i
].limit
;
637 INIT_WORK(&ent
->work
, cache_work_func
);
638 INIT_DELAYED_WORK(&ent
->dwork
, delayed_cache_work_func
);
640 queue_work(cache
->wq
, &ent
->work
);
643 err
= mlx5_mr_cache_debugfs_init(dev
);
645 mlx5_ib_warn(dev
, "cache debugfs failure\n");
650 static void wait_for_async_commands(struct mlx5_ib_dev
*dev
)
652 struct mlx5_mr_cache
*cache
= &dev
->cache
;
653 struct mlx5_cache_ent
*ent
;
658 for (i
= 0; i
< MAX_MR_CACHE_ENTRIES
; i
++) {
659 ent
= &cache
->ent
[i
];
660 for (j
= 0 ; j
< 1000; j
++) {
666 for (i
= 0; i
< MAX_MR_CACHE_ENTRIES
; i
++) {
667 ent
= &cache
->ent
[i
];
668 total
+= ent
->pending
;
672 mlx5_ib_warn(dev
, "aborted while there are %d pending mr requests\n", total
);
674 mlx5_ib_warn(dev
, "done with all pending requests\n");
677 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev
*dev
)
681 dev
->cache
.stopped
= 1;
682 flush_workqueue(dev
->cache
.wq
);
684 mlx5_mr_cache_debugfs_cleanup(dev
);
686 for (i
= 0; i
< MAX_MR_CACHE_ENTRIES
; i
++)
689 destroy_workqueue(dev
->cache
.wq
);
690 wait_for_async_commands(dev
);
691 del_timer_sync(&dev
->delay_timer
);
696 struct ib_mr
*mlx5_ib_get_dma_mr(struct ib_pd
*pd
, int acc
)
698 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
699 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
700 struct mlx5_core_dev
*mdev
= dev
->mdev
;
701 struct mlx5_ib_mr
*mr
;
706 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
708 return ERR_PTR(-ENOMEM
);
710 in
= kzalloc(inlen
, GFP_KERNEL
);
716 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
718 MLX5_SET(mkc
, mkc
, access_mode
, MLX5_MKC_ACCESS_MODE_PA
);
719 MLX5_SET(mkc
, mkc
, a
, !!(acc
& IB_ACCESS_REMOTE_ATOMIC
));
720 MLX5_SET(mkc
, mkc
, rw
, !!(acc
& IB_ACCESS_REMOTE_WRITE
));
721 MLX5_SET(mkc
, mkc
, rr
, !!(acc
& IB_ACCESS_REMOTE_READ
));
722 MLX5_SET(mkc
, mkc
, lw
, !!(acc
& IB_ACCESS_LOCAL_WRITE
));
723 MLX5_SET(mkc
, mkc
, lr
, 1);
725 MLX5_SET(mkc
, mkc
, length64
, 1);
726 MLX5_SET(mkc
, mkc
, pd
, to_mpd(pd
)->pdn
);
727 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
728 MLX5_SET64(mkc
, mkc
, start_addr
, 0);
730 err
= mlx5_core_create_mkey(mdev
, &mr
->mmkey
, in
, inlen
);
735 mr
->ibmr
.lkey
= mr
->mmkey
.key
;
736 mr
->ibmr
.rkey
= mr
->mmkey
.key
;
750 static int get_octo_len(u64 addr
, u64 len
, int page_size
)
755 offset
= addr
& (page_size
- 1);
756 npages
= ALIGN(len
+ offset
, page_size
) >> ilog2(page_size
);
757 return (npages
+ 1) / 2;
760 static int use_umr(int order
)
762 return order
<= MLX5_MAX_UMR_SHIFT
;
765 static int dma_map_mr_pas(struct mlx5_ib_dev
*dev
, struct ib_umem
*umem
,
766 int npages
, int page_shift
, int *size
,
767 __be64
**mr_pas
, dma_addr_t
*dma
)
770 struct device
*ddev
= dev
->ib_dev
.dma_device
;
773 * UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
774 * To avoid copying garbage after the pas array, we allocate
777 *size
= ALIGN(sizeof(u64
) * npages
, MLX5_UMR_MTT_ALIGNMENT
);
778 *mr_pas
= kmalloc(*size
+ MLX5_UMR_ALIGN
- 1, GFP_KERNEL
);
782 pas
= PTR_ALIGN(*mr_pas
, MLX5_UMR_ALIGN
);
783 mlx5_ib_populate_pas(dev
, umem
, page_shift
, pas
, MLX5_IB_MTT_PRESENT
);
784 /* Clear padding after the actual pages. */
785 memset(pas
+ npages
, 0, *size
- npages
* sizeof(u64
));
787 *dma
= dma_map_single(ddev
, pas
, *size
, DMA_TO_DEVICE
);
788 if (dma_mapping_error(ddev
, *dma
)) {
796 static void prep_umr_wqe_common(struct ib_pd
*pd
, struct ib_send_wr
*wr
,
797 struct ib_sge
*sg
, u64 dma
, int n
, u32 key
,
800 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
801 struct mlx5_umr_wr
*umrwr
= umr_wr(wr
);
804 sg
->length
= ALIGN(sizeof(u64
) * n
, 64);
805 sg
->lkey
= dev
->umrc
.pd
->local_dma_lkey
;
814 wr
->opcode
= MLX5_IB_WR_UMR
;
817 umrwr
->page_shift
= page_shift
;
821 static void prep_umr_reg_wqe(struct ib_pd
*pd
, struct ib_send_wr
*wr
,
822 struct ib_sge
*sg
, u64 dma
, int n
, u32 key
,
823 int page_shift
, u64 virt_addr
, u64 len
,
826 struct mlx5_umr_wr
*umrwr
= umr_wr(wr
);
828 prep_umr_wqe_common(pd
, wr
, sg
, dma
, n
, key
, page_shift
);
832 umrwr
->target
.virt_addr
= virt_addr
;
834 umrwr
->access_flags
= access_flags
;
838 static void prep_umr_unreg_wqe(struct mlx5_ib_dev
*dev
,
839 struct ib_send_wr
*wr
, u32 key
)
841 struct mlx5_umr_wr
*umrwr
= umr_wr(wr
);
843 wr
->send_flags
= MLX5_IB_SEND_UMR_UNREG
| MLX5_IB_SEND_UMR_FAIL_IF_FREE
;
844 wr
->opcode
= MLX5_IB_WR_UMR
;
848 static struct ib_umem
*mr_umem_get(struct ib_pd
*pd
, u64 start
, u64 length
,
849 int access_flags
, int *npages
,
850 int *page_shift
, int *ncont
, int *order
)
852 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
853 struct ib_umem
*umem
= ib_umem_get(pd
->uobject
->context
, start
, length
,
856 mlx5_ib_err(dev
, "umem get failed (%ld)\n", PTR_ERR(umem
));
860 mlx5_ib_cont_pages(umem
, start
, npages
, page_shift
, ncont
, order
);
862 mlx5_ib_warn(dev
, "avoid zero region\n");
863 ib_umem_release(umem
);
864 return ERR_PTR(-EINVAL
);
867 mlx5_ib_dbg(dev
, "npages %d, ncont %d, order %d, page_shift %d\n",
868 *npages
, *ncont
, *order
, *page_shift
);
873 static void mlx5_ib_umr_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
875 struct mlx5_ib_umr_context
*context
=
876 container_of(wc
->wr_cqe
, struct mlx5_ib_umr_context
, cqe
);
878 context
->status
= wc
->status
;
879 complete(&context
->done
);
882 static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context
*context
)
884 context
->cqe
.done
= mlx5_ib_umr_done
;
885 context
->status
= -1;
886 init_completion(&context
->done
);
889 static struct mlx5_ib_mr
*reg_umr(struct ib_pd
*pd
, struct ib_umem
*umem
,
890 u64 virt_addr
, u64 len
, int npages
,
891 int page_shift
, int order
, int access_flags
)
893 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
894 struct device
*ddev
= dev
->ib_dev
.dma_device
;
895 struct umr_common
*umrc
= &dev
->umrc
;
896 struct mlx5_ib_umr_context umr_context
;
897 struct mlx5_umr_wr umrwr
= {};
898 struct ib_send_wr
*bad
;
899 struct mlx5_ib_mr
*mr
;
907 for (i
= 0; i
< 1; i
++) {
908 mr
= alloc_cached_mr(dev
, order
);
912 err
= add_keys(dev
, order2idx(dev
, order
), 1);
913 if (err
&& err
!= -EAGAIN
) {
914 mlx5_ib_warn(dev
, "add_keys failed, err %d\n", err
);
920 return ERR_PTR(-EAGAIN
);
922 err
= dma_map_mr_pas(dev
, umem
, npages
, page_shift
, &size
, &mr_pas
,
927 mlx5_ib_init_umr_context(&umr_context
);
929 umrwr
.wr
.wr_cqe
= &umr_context
.cqe
;
930 prep_umr_reg_wqe(pd
, &umrwr
.wr
, &sg
, dma
, npages
, mr
->mmkey
.key
,
931 page_shift
, virt_addr
, len
, access_flags
);
934 err
= ib_post_send(umrc
->qp
, &umrwr
.wr
, &bad
);
936 mlx5_ib_warn(dev
, "post send failed, err %d\n", err
);
939 wait_for_completion(&umr_context
.done
);
940 if (umr_context
.status
!= IB_WC_SUCCESS
) {
941 mlx5_ib_warn(dev
, "reg umr failed\n");
946 mr
->mmkey
.iova
= virt_addr
;
947 mr
->mmkey
.size
= len
;
948 mr
->mmkey
.pd
= to_mpd(pd
)->pdn
;
954 dma_unmap_single(ddev
, dma
, size
, DMA_TO_DEVICE
);
960 free_cached_mr(dev
, mr
);
967 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
968 int mlx5_ib_update_mtt(struct mlx5_ib_mr
*mr
, u64 start_page_index
, int npages
,
971 struct mlx5_ib_dev
*dev
= mr
->dev
;
972 struct device
*ddev
= dev
->ib_dev
.dma_device
;
973 struct umr_common
*umrc
= &dev
->umrc
;
974 struct mlx5_ib_umr_context umr_context
;
975 struct ib_umem
*umem
= mr
->umem
;
979 struct ib_send_wr
*bad
;
980 struct mlx5_umr_wr wr
;
983 const int page_index_alignment
= MLX5_UMR_MTT_ALIGNMENT
/ sizeof(u64
);
984 const int page_index_mask
= page_index_alignment
- 1;
985 size_t pages_mapped
= 0;
986 size_t pages_to_map
= 0;
987 size_t pages_iter
= 0;
988 int use_emergency_buf
= 0;
990 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
991 * so we need to align the offset and length accordingly */
992 if (start_page_index
& page_index_mask
) {
993 npages
+= start_page_index
& page_index_mask
;
994 start_page_index
&= ~page_index_mask
;
997 pages_to_map
= ALIGN(npages
, page_index_alignment
);
999 if (start_page_index
+ pages_to_map
> MLX5_MAX_UMR_PAGES
)
1002 size
= sizeof(u64
) * pages_to_map
;
1003 size
= min_t(int, PAGE_SIZE
, size
);
1004 /* We allocate with GFP_ATOMIC to avoid recursion into page-reclaim
1005 * code, when we are called from an invalidation. The pas buffer must
1006 * be 2k-aligned for Connect-IB. */
1007 pas
= (__be64
*)get_zeroed_page(GFP_ATOMIC
);
1009 mlx5_ib_warn(dev
, "unable to allocate memory during MTT update, falling back to slower chunked mechanism.\n");
1010 pas
= mlx5_ib_update_mtt_emergency_buffer
;
1011 size
= MLX5_UMR_MTT_MIN_CHUNK_SIZE
;
1012 use_emergency_buf
= 1;
1013 mutex_lock(&mlx5_ib_update_mtt_emergency_buffer_mutex
);
1014 memset(pas
, 0, size
);
1016 pages_iter
= size
/ sizeof(u64
);
1017 dma
= dma_map_single(ddev
, pas
, size
, DMA_TO_DEVICE
);
1018 if (dma_mapping_error(ddev
, dma
)) {
1019 mlx5_ib_err(dev
, "unable to map DMA during MTT update.\n");
1024 for (pages_mapped
= 0;
1025 pages_mapped
< pages_to_map
&& !err
;
1026 pages_mapped
+= pages_iter
, start_page_index
+= pages_iter
) {
1027 dma_sync_single_for_cpu(ddev
, dma
, size
, DMA_TO_DEVICE
);
1029 npages
= min_t(size_t,
1031 ib_umem_num_pages(umem
) - start_page_index
);
1034 __mlx5_ib_populate_pas(dev
, umem
, PAGE_SHIFT
,
1035 start_page_index
, npages
, pas
,
1036 MLX5_IB_MTT_PRESENT
);
1037 /* Clear padding after the pages brought from the
1039 memset(pas
+ npages
, 0, size
- npages
* sizeof(u64
));
1042 dma_sync_single_for_device(ddev
, dma
, size
, DMA_TO_DEVICE
);
1044 mlx5_ib_init_umr_context(&umr_context
);
1046 memset(&wr
, 0, sizeof(wr
));
1047 wr
.wr
.wr_cqe
= &umr_context
.cqe
;
1050 sg
.length
= ALIGN(npages
* sizeof(u64
),
1051 MLX5_UMR_MTT_ALIGNMENT
);
1052 sg
.lkey
= dev
->umrc
.pd
->local_dma_lkey
;
1054 wr
.wr
.send_flags
= MLX5_IB_SEND_UMR_FAIL_IF_FREE
|
1055 MLX5_IB_SEND_UMR_UPDATE_MTT
;
1056 wr
.wr
.sg_list
= &sg
;
1058 wr
.wr
.opcode
= MLX5_IB_WR_UMR
;
1059 wr
.npages
= sg
.length
/ sizeof(u64
);
1060 wr
.page_shift
= PAGE_SHIFT
;
1061 wr
.mkey
= mr
->mmkey
.key
;
1062 wr
.target
.offset
= start_page_index
;
1065 err
= ib_post_send(umrc
->qp
, &wr
.wr
, &bad
);
1067 mlx5_ib_err(dev
, "UMR post send failed, err %d\n", err
);
1069 wait_for_completion(&umr_context
.done
);
1070 if (umr_context
.status
!= IB_WC_SUCCESS
) {
1071 mlx5_ib_err(dev
, "UMR completion failed, code %d\n",
1072 umr_context
.status
);
1078 dma_unmap_single(ddev
, dma
, size
, DMA_TO_DEVICE
);
1081 if (!use_emergency_buf
)
1082 free_page((unsigned long)pas
);
1084 mutex_unlock(&mlx5_ib_update_mtt_emergency_buffer_mutex
);
1091 * If ibmr is NULL it will be allocated by reg_create.
1092 * Else, the given ibmr will be used.
1094 static struct mlx5_ib_mr
*reg_create(struct ib_mr
*ibmr
, struct ib_pd
*pd
,
1095 u64 virt_addr
, u64 length
,
1096 struct ib_umem
*umem
, int npages
,
1097 int page_shift
, int access_flags
)
1099 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1100 struct mlx5_ib_mr
*mr
;
1106 bool pg_cap
= !!(MLX5_CAP_GEN(dev
->mdev
, pg
));
1108 mr
= ibmr
? to_mmr(ibmr
) : kzalloc(sizeof(*mr
), GFP_KERNEL
);
1110 return ERR_PTR(-ENOMEM
);
1112 inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
) +
1113 sizeof(*pas
) * ((npages
+ 1) / 2) * 2;
1114 in
= mlx5_vzalloc(inlen
);
1119 pas
= (__be64
*)MLX5_ADDR_OF(create_mkey_in
, in
, klm_pas_mtt
);
1120 mlx5_ib_populate_pas(dev
, umem
, page_shift
, pas
,
1121 pg_cap
? MLX5_IB_MTT_PRESENT
: 0);
1123 /* The pg_access bit allows setting the access flags
1124 * in the page list submitted with the command. */
1125 MLX5_SET(create_mkey_in
, in
, pg_access
, !!(pg_cap
));
1127 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
1128 MLX5_SET(mkc
, mkc
, access_mode
, MLX5_MKC_ACCESS_MODE_MTT
);
1129 MLX5_SET(mkc
, mkc
, a
, !!(access_flags
& IB_ACCESS_REMOTE_ATOMIC
));
1130 MLX5_SET(mkc
, mkc
, rw
, !!(access_flags
& IB_ACCESS_REMOTE_WRITE
));
1131 MLX5_SET(mkc
, mkc
, rr
, !!(access_flags
& IB_ACCESS_REMOTE_READ
));
1132 MLX5_SET(mkc
, mkc
, lw
, !!(access_flags
& IB_ACCESS_LOCAL_WRITE
));
1133 MLX5_SET(mkc
, mkc
, lr
, 1);
1135 MLX5_SET64(mkc
, mkc
, start_addr
, virt_addr
);
1136 MLX5_SET64(mkc
, mkc
, len
, length
);
1137 MLX5_SET(mkc
, mkc
, pd
, to_mpd(pd
)->pdn
);
1138 MLX5_SET(mkc
, mkc
, bsf_octword_size
, 0);
1139 MLX5_SET(mkc
, mkc
, translations_octword_size
,
1140 get_octo_len(virt_addr
, length
, 1 << page_shift
));
1141 MLX5_SET(mkc
, mkc
, log_page_size
, page_shift
);
1142 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
1143 MLX5_SET(create_mkey_in
, in
, translations_octword_actual_size
,
1144 get_octo_len(virt_addr
, length
, 1 << page_shift
));
1146 err
= mlx5_core_create_mkey(dev
->mdev
, &mr
->mmkey
, in
, inlen
);
1148 mlx5_ib_warn(dev
, "create mkey failed\n");
1156 mlx5_ib_dbg(dev
, "mkey = 0x%x\n", mr
->mmkey
.key
);
1167 return ERR_PTR(err
);
1170 static void set_mr_fileds(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
,
1171 int npages
, u64 length
, int access_flags
)
1173 mr
->npages
= npages
;
1174 atomic_add(npages
, &dev
->mdev
->priv
.reg_pages
);
1175 mr
->ibmr
.lkey
= mr
->mmkey
.key
;
1176 mr
->ibmr
.rkey
= mr
->mmkey
.key
;
1177 mr
->ibmr
.length
= length
;
1178 mr
->access_flags
= access_flags
;
1181 struct ib_mr
*mlx5_ib_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
1182 u64 virt_addr
, int access_flags
,
1183 struct ib_udata
*udata
)
1185 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1186 struct mlx5_ib_mr
*mr
= NULL
;
1187 struct ib_umem
*umem
;
1194 mlx5_ib_dbg(dev
, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1195 start
, virt_addr
, length
, access_flags
);
1196 umem
= mr_umem_get(pd
, start
, length
, access_flags
, &npages
,
1197 &page_shift
, &ncont
, &order
);
1200 return (void *)umem
;
1202 if (use_umr(order
)) {
1203 mr
= reg_umr(pd
, umem
, virt_addr
, length
, ncont
, page_shift
,
1204 order
, access_flags
);
1205 if (PTR_ERR(mr
) == -EAGAIN
) {
1206 mlx5_ib_dbg(dev
, "cache empty for order %d", order
);
1209 } else if (access_flags
& IB_ACCESS_ON_DEMAND
) {
1211 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
1216 mutex_lock(&dev
->slow_path_mutex
);
1217 mr
= reg_create(NULL
, pd
, virt_addr
, length
, umem
, ncont
,
1218 page_shift
, access_flags
);
1219 mutex_unlock(&dev
->slow_path_mutex
);
1227 mlx5_ib_dbg(dev
, "mkey 0x%x\n", mr
->mmkey
.key
);
1230 set_mr_fileds(dev
, mr
, npages
, length
, access_flags
);
1232 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1239 ib_umem_release(umem
);
1240 return ERR_PTR(err
);
1243 static int unreg_umr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
)
1245 struct mlx5_core_dev
*mdev
= dev
->mdev
;
1246 struct umr_common
*umrc
= &dev
->umrc
;
1247 struct mlx5_ib_umr_context umr_context
;
1248 struct mlx5_umr_wr umrwr
= {};
1249 struct ib_send_wr
*bad
;
1252 if (mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
)
1255 mlx5_ib_init_umr_context(&umr_context
);
1257 umrwr
.wr
.wr_cqe
= &umr_context
.cqe
;
1258 prep_umr_unreg_wqe(dev
, &umrwr
.wr
, mr
->mmkey
.key
);
1261 err
= ib_post_send(umrc
->qp
, &umrwr
.wr
, &bad
);
1264 mlx5_ib_dbg(dev
, "err %d\n", err
);
1267 wait_for_completion(&umr_context
.done
);
1270 if (umr_context
.status
!= IB_WC_SUCCESS
) {
1271 mlx5_ib_warn(dev
, "unreg umr failed\n");
1281 static int rereg_umr(struct ib_pd
*pd
, struct mlx5_ib_mr
*mr
, u64 virt_addr
,
1282 u64 length
, int npages
, int page_shift
, int order
,
1283 int access_flags
, int flags
)
1285 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1286 struct device
*ddev
= dev
->ib_dev
.dma_device
;
1287 struct mlx5_ib_umr_context umr_context
;
1288 struct ib_send_wr
*bad
;
1289 struct mlx5_umr_wr umrwr
= {};
1291 struct umr_common
*umrc
= &dev
->umrc
;
1293 __be64
*mr_pas
= NULL
;
1297 mlx5_ib_init_umr_context(&umr_context
);
1299 umrwr
.wr
.wr_cqe
= &umr_context
.cqe
;
1300 umrwr
.wr
.send_flags
= MLX5_IB_SEND_UMR_FAIL_IF_FREE
;
1302 if (flags
& IB_MR_REREG_TRANS
) {
1303 err
= dma_map_mr_pas(dev
, mr
->umem
, npages
, page_shift
, &size
,
1308 umrwr
.target
.virt_addr
= virt_addr
;
1309 umrwr
.length
= length
;
1310 umrwr
.wr
.send_flags
|= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION
;
1313 prep_umr_wqe_common(pd
, &umrwr
.wr
, &sg
, dma
, npages
, mr
->mmkey
.key
,
1316 if (flags
& IB_MR_REREG_PD
) {
1318 umrwr
.wr
.send_flags
|= MLX5_IB_SEND_UMR_UPDATE_PD
;
1321 if (flags
& IB_MR_REREG_ACCESS
) {
1322 umrwr
.access_flags
= access_flags
;
1323 umrwr
.wr
.send_flags
|= MLX5_IB_SEND_UMR_UPDATE_ACCESS
;
1326 /* post send request to UMR QP */
1328 err
= ib_post_send(umrc
->qp
, &umrwr
.wr
, &bad
);
1331 mlx5_ib_warn(dev
, "post send failed, err %d\n", err
);
1333 wait_for_completion(&umr_context
.done
);
1334 if (umr_context
.status
!= IB_WC_SUCCESS
) {
1335 mlx5_ib_warn(dev
, "reg umr failed (%u)\n",
1336 umr_context
.status
);
1342 if (flags
& IB_MR_REREG_TRANS
) {
1343 dma_unmap_single(ddev
, dma
, size
, DMA_TO_DEVICE
);
1349 int mlx5_ib_rereg_user_mr(struct ib_mr
*ib_mr
, int flags
, u64 start
,
1350 u64 length
, u64 virt_addr
, int new_access_flags
,
1351 struct ib_pd
*new_pd
, struct ib_udata
*udata
)
1353 struct mlx5_ib_dev
*dev
= to_mdev(ib_mr
->device
);
1354 struct mlx5_ib_mr
*mr
= to_mmr(ib_mr
);
1355 struct ib_pd
*pd
= (flags
& IB_MR_REREG_PD
) ? new_pd
: ib_mr
->pd
;
1356 int access_flags
= flags
& IB_MR_REREG_ACCESS
?
1359 u64 addr
= (flags
& IB_MR_REREG_TRANS
) ? virt_addr
: mr
->umem
->address
;
1360 u64 len
= (flags
& IB_MR_REREG_TRANS
) ? length
: mr
->umem
->length
;
1367 mlx5_ib_dbg(dev
, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1368 start
, virt_addr
, length
, access_flags
);
1370 if (flags
!= IB_MR_REREG_PD
) {
1372 * Replace umem. This needs to be done whether or not UMR is
1375 flags
|= IB_MR_REREG_TRANS
;
1376 ib_umem_release(mr
->umem
);
1377 mr
->umem
= mr_umem_get(pd
, addr
, len
, access_flags
, &npages
,
1378 &page_shift
, &ncont
, &order
);
1379 if (IS_ERR(mr
->umem
)) {
1380 err
= PTR_ERR(mr
->umem
);
1386 if (flags
& IB_MR_REREG_TRANS
&& !use_umr_mtt_update(mr
, addr
, len
)) {
1388 * UMR can't be used - MKey needs to be replaced.
1391 err
= unreg_umr(dev
, mr
);
1393 mlx5_ib_warn(dev
, "Failed to unregister MR\n");
1395 err
= destroy_mkey(dev
, mr
);
1397 mlx5_ib_warn(dev
, "Failed to destroy MKey\n");
1402 mr
= reg_create(ib_mr
, pd
, addr
, len
, mr
->umem
, ncont
,
1403 page_shift
, access_flags
);
1413 err
= rereg_umr(pd
, mr
, addr
, len
, npages
, page_shift
,
1414 order
, access_flags
, flags
);
1416 mlx5_ib_warn(dev
, "Failed to rereg UMR\n");
1421 if (flags
& IB_MR_REREG_PD
) {
1423 mr
->mmkey
.pd
= to_mpd(pd
)->pdn
;
1426 if (flags
& IB_MR_REREG_ACCESS
)
1427 mr
->access_flags
= access_flags
;
1429 if (flags
& IB_MR_REREG_TRANS
) {
1430 atomic_sub(mr
->npages
, &dev
->mdev
->priv
.reg_pages
);
1431 set_mr_fileds(dev
, mr
, npages
, len
, access_flags
);
1432 mr
->mmkey
.iova
= addr
;
1433 mr
->mmkey
.size
= len
;
1435 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1443 mlx5_alloc_priv_descs(struct ib_device
*device
,
1444 struct mlx5_ib_mr
*mr
,
1448 int size
= ndescs
* desc_size
;
1452 add_size
= max_t(int, MLX5_UMR_ALIGN
- ARCH_KMALLOC_MINALIGN
, 0);
1454 mr
->descs_alloc
= kzalloc(size
+ add_size
, GFP_KERNEL
);
1455 if (!mr
->descs_alloc
)
1458 mr
->descs
= PTR_ALIGN(mr
->descs_alloc
, MLX5_UMR_ALIGN
);
1460 mr
->desc_map
= dma_map_single(device
->dma_device
, mr
->descs
,
1461 size
, DMA_TO_DEVICE
);
1462 if (dma_mapping_error(device
->dma_device
, mr
->desc_map
)) {
1469 kfree(mr
->descs_alloc
);
1475 mlx5_free_priv_descs(struct mlx5_ib_mr
*mr
)
1478 struct ib_device
*device
= mr
->ibmr
.device
;
1479 int size
= mr
->max_descs
* mr
->desc_size
;
1481 dma_unmap_single(device
->dma_device
, mr
->desc_map
,
1482 size
, DMA_TO_DEVICE
);
1483 kfree(mr
->descs_alloc
);
1488 static int clean_mr(struct mlx5_ib_mr
*mr
)
1490 struct mlx5_ib_dev
*dev
= to_mdev(mr
->ibmr
.device
);
1491 int umred
= mr
->umred
;
1495 if (mlx5_core_destroy_psv(dev
->mdev
,
1496 mr
->sig
->psv_memory
.psv_idx
))
1497 mlx5_ib_warn(dev
, "failed to destroy mem psv %d\n",
1498 mr
->sig
->psv_memory
.psv_idx
);
1499 if (mlx5_core_destroy_psv(dev
->mdev
,
1500 mr
->sig
->psv_wire
.psv_idx
))
1501 mlx5_ib_warn(dev
, "failed to destroy wire psv %d\n",
1502 mr
->sig
->psv_wire
.psv_idx
);
1507 mlx5_free_priv_descs(mr
);
1510 err
= destroy_mkey(dev
, mr
);
1512 mlx5_ib_warn(dev
, "failed to destroy mkey 0x%x (%d)\n",
1513 mr
->mmkey
.key
, err
);
1517 err
= unreg_umr(dev
, mr
);
1519 mlx5_ib_warn(dev
, "failed unregister\n");
1522 free_cached_mr(dev
, mr
);
1531 int mlx5_ib_dereg_mr(struct ib_mr
*ibmr
)
1533 struct mlx5_ib_dev
*dev
= to_mdev(ibmr
->device
);
1534 struct mlx5_ib_mr
*mr
= to_mmr(ibmr
);
1535 int npages
= mr
->npages
;
1536 struct ib_umem
*umem
= mr
->umem
;
1538 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1539 if (umem
&& umem
->odp_data
) {
1540 /* Prevent new page faults from succeeding */
1542 /* Wait for all running page-fault handlers to finish. */
1543 synchronize_srcu(&dev
->mr_srcu
);
1544 /* Destroy all page mappings */
1545 mlx5_ib_invalidate_range(umem
, ib_umem_start(umem
),
1548 * We kill the umem before the MR for ODP,
1549 * so that there will not be any invalidations in
1550 * flight, looking at the *mr struct.
1552 ib_umem_release(umem
);
1553 atomic_sub(npages
, &dev
->mdev
->priv
.reg_pages
);
1555 /* Avoid double-freeing the umem. */
1563 ib_umem_release(umem
);
1564 atomic_sub(npages
, &dev
->mdev
->priv
.reg_pages
);
1570 struct ib_mr
*mlx5_ib_alloc_mr(struct ib_pd
*pd
,
1571 enum ib_mr_type mr_type
,
1574 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1575 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
1576 int ndescs
= ALIGN(max_num_sg
, 4);
1577 struct mlx5_ib_mr
*mr
;
1582 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
1584 return ERR_PTR(-ENOMEM
);
1586 in
= kzalloc(inlen
, GFP_KERNEL
);
1592 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
1593 MLX5_SET(mkc
, mkc
, free
, 1);
1594 MLX5_SET(mkc
, mkc
, translations_octword_size
, ndescs
);
1595 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
1596 MLX5_SET(mkc
, mkc
, pd
, to_mpd(pd
)->pdn
);
1598 if (mr_type
== IB_MR_TYPE_MEM_REG
) {
1599 mr
->access_mode
= MLX5_MKC_ACCESS_MODE_MTT
;
1600 MLX5_SET(mkc
, mkc
, log_page_size
, PAGE_SHIFT
);
1601 err
= mlx5_alloc_priv_descs(pd
->device
, mr
,
1602 ndescs
, sizeof(u64
));
1606 mr
->desc_size
= sizeof(u64
);
1607 mr
->max_descs
= ndescs
;
1608 } else if (mr_type
== IB_MR_TYPE_SG_GAPS
) {
1609 mr
->access_mode
= MLX5_MKC_ACCESS_MODE_KLMS
;
1611 err
= mlx5_alloc_priv_descs(pd
->device
, mr
,
1612 ndescs
, sizeof(struct mlx5_klm
));
1615 mr
->desc_size
= sizeof(struct mlx5_klm
);
1616 mr
->max_descs
= ndescs
;
1617 } else if (mr_type
== IB_MR_TYPE_SIGNATURE
) {
1620 MLX5_SET(mkc
, mkc
, bsf_en
, 1);
1621 MLX5_SET(mkc
, mkc
, bsf_octword_size
, MLX5_MKEY_BSF_OCTO_SIZE
);
1622 mr
->sig
= kzalloc(sizeof(*mr
->sig
), GFP_KERNEL
);
1628 /* create mem & wire PSVs */
1629 err
= mlx5_core_create_psv(dev
->mdev
, to_mpd(pd
)->pdn
,
1634 mr
->access_mode
= MLX5_MKC_ACCESS_MODE_KLMS
;
1635 mr
->sig
->psv_memory
.psv_idx
= psv_index
[0];
1636 mr
->sig
->psv_wire
.psv_idx
= psv_index
[1];
1638 mr
->sig
->sig_status_checked
= true;
1639 mr
->sig
->sig_err_exists
= false;
1640 /* Next UMR, Arm SIGERR */
1641 ++mr
->sig
->sigerr_count
;
1643 mlx5_ib_warn(dev
, "Invalid mr type %d\n", mr_type
);
1648 MLX5_SET(mkc
, mkc
, access_mode
, mr
->access_mode
);
1649 MLX5_SET(mkc
, mkc
, umr_en
, 1);
1651 mr
->ibmr
.device
= pd
->device
;
1652 err
= mlx5_core_create_mkey(dev
->mdev
, &mr
->mmkey
, in
, inlen
);
1654 goto err_destroy_psv
;
1656 mr
->ibmr
.lkey
= mr
->mmkey
.key
;
1657 mr
->ibmr
.rkey
= mr
->mmkey
.key
;
1665 if (mlx5_core_destroy_psv(dev
->mdev
,
1666 mr
->sig
->psv_memory
.psv_idx
))
1667 mlx5_ib_warn(dev
, "failed to destroy mem psv %d\n",
1668 mr
->sig
->psv_memory
.psv_idx
);
1669 if (mlx5_core_destroy_psv(dev
->mdev
,
1670 mr
->sig
->psv_wire
.psv_idx
))
1671 mlx5_ib_warn(dev
, "failed to destroy wire psv %d\n",
1672 mr
->sig
->psv_wire
.psv_idx
);
1674 mlx5_free_priv_descs(mr
);
1681 return ERR_PTR(err
);
1684 struct ib_mw
*mlx5_ib_alloc_mw(struct ib_pd
*pd
, enum ib_mw_type type
,
1685 struct ib_udata
*udata
)
1687 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1688 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
1689 struct mlx5_ib_mw
*mw
= NULL
;
1694 struct mlx5_ib_alloc_mw req
= {};
1697 __u32 response_length
;
1700 err
= ib_copy_from_udata(&req
, udata
, min(udata
->inlen
, sizeof(req
)));
1702 return ERR_PTR(err
);
1704 if (req
.comp_mask
|| req
.reserved1
|| req
.reserved2
)
1705 return ERR_PTR(-EOPNOTSUPP
);
1707 if (udata
->inlen
> sizeof(req
) &&
1708 !ib_is_udata_cleared(udata
, sizeof(req
),
1709 udata
->inlen
- sizeof(req
)))
1710 return ERR_PTR(-EOPNOTSUPP
);
1712 ndescs
= req
.num_klms
? roundup(req
.num_klms
, 4) : roundup(1, 4);
1714 mw
= kzalloc(sizeof(*mw
), GFP_KERNEL
);
1715 in
= kzalloc(inlen
, GFP_KERNEL
);
1721 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
1723 MLX5_SET(mkc
, mkc
, free
, 1);
1724 MLX5_SET(mkc
, mkc
, translations_octword_size
, ndescs
);
1725 MLX5_SET(mkc
, mkc
, pd
, to_mpd(pd
)->pdn
);
1726 MLX5_SET(mkc
, mkc
, umr_en
, 1);
1727 MLX5_SET(mkc
, mkc
, lr
, 1);
1728 MLX5_SET(mkc
, mkc
, access_mode
, MLX5_MKC_ACCESS_MODE_KLMS
);
1729 MLX5_SET(mkc
, mkc
, en_rinval
, !!((type
== IB_MW_TYPE_2
)));
1730 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
1732 err
= mlx5_core_create_mkey(dev
->mdev
, &mw
->mmkey
, in
, inlen
);
1736 mw
->ibmw
.rkey
= mw
->mmkey
.key
;
1738 resp
.response_length
= min(offsetof(typeof(resp
), response_length
) +
1739 sizeof(resp
.response_length
), udata
->outlen
);
1740 if (resp
.response_length
) {
1741 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
1743 mlx5_core_destroy_mkey(dev
->mdev
, &mw
->mmkey
);
1754 return ERR_PTR(err
);
1757 int mlx5_ib_dealloc_mw(struct ib_mw
*mw
)
1759 struct mlx5_ib_mw
*mmw
= to_mmw(mw
);
1762 err
= mlx5_core_destroy_mkey((to_mdev(mw
->device
))->mdev
,
1769 int mlx5_ib_check_mr_status(struct ib_mr
*ibmr
, u32 check_mask
,
1770 struct ib_mr_status
*mr_status
)
1772 struct mlx5_ib_mr
*mmr
= to_mmr(ibmr
);
1775 if (check_mask
& ~IB_MR_CHECK_SIG_STATUS
) {
1776 pr_err("Invalid status check mask\n");
1781 mr_status
->fail_status
= 0;
1782 if (check_mask
& IB_MR_CHECK_SIG_STATUS
) {
1785 pr_err("signature status check requested on a non-signature enabled MR\n");
1789 mmr
->sig
->sig_status_checked
= true;
1790 if (!mmr
->sig
->sig_err_exists
)
1793 if (ibmr
->lkey
== mmr
->sig
->err_item
.key
)
1794 memcpy(&mr_status
->sig_err
, &mmr
->sig
->err_item
,
1795 sizeof(mr_status
->sig_err
));
1797 mr_status
->sig_err
.err_type
= IB_SIG_BAD_GUARD
;
1798 mr_status
->sig_err
.sig_err_offset
= 0;
1799 mr_status
->sig_err
.key
= mmr
->sig
->err_item
.key
;
1802 mmr
->sig
->sig_err_exists
= false;
1803 mr_status
->fail_status
|= IB_MR_CHECK_SIG_STATUS
;
1811 mlx5_ib_sg_to_klms(struct mlx5_ib_mr
*mr
,
1812 struct scatterlist
*sgl
,
1813 unsigned short sg_nents
,
1814 unsigned int *sg_offset_p
)
1816 struct scatterlist
*sg
= sgl
;
1817 struct mlx5_klm
*klms
= mr
->descs
;
1818 unsigned int sg_offset
= sg_offset_p
? *sg_offset_p
: 0;
1819 u32 lkey
= mr
->ibmr
.pd
->local_dma_lkey
;
1822 mr
->ibmr
.iova
= sg_dma_address(sg
) + sg_offset
;
1823 mr
->ibmr
.length
= 0;
1825 for_each_sg(sgl
, sg
, sg_nents
, i
) {
1826 if (unlikely(i
>= mr
->max_descs
))
1828 klms
[i
].va
= cpu_to_be64(sg_dma_address(sg
) + sg_offset
);
1829 klms
[i
].bcount
= cpu_to_be32(sg_dma_len(sg
) - sg_offset
);
1830 klms
[i
].key
= cpu_to_be32(lkey
);
1831 mr
->ibmr
.length
+= sg_dma_len(sg
) - sg_offset
;
1838 *sg_offset_p
= sg_offset
;
1843 static int mlx5_set_page(struct ib_mr
*ibmr
, u64 addr
)
1845 struct mlx5_ib_mr
*mr
= to_mmr(ibmr
);
1848 if (unlikely(mr
->ndescs
== mr
->max_descs
))
1852 descs
[mr
->ndescs
++] = cpu_to_be64(addr
| MLX5_EN_RD
| MLX5_EN_WR
);
1857 int mlx5_ib_map_mr_sg(struct ib_mr
*ibmr
, struct scatterlist
*sg
, int sg_nents
,
1858 unsigned int *sg_offset
)
1860 struct mlx5_ib_mr
*mr
= to_mmr(ibmr
);
1865 ib_dma_sync_single_for_cpu(ibmr
->device
, mr
->desc_map
,
1866 mr
->desc_size
* mr
->max_descs
,
1869 if (mr
->access_mode
== MLX5_MKC_ACCESS_MODE_KLMS
)
1870 n
= mlx5_ib_sg_to_klms(mr
, sg
, sg_nents
, sg_offset
);
1872 n
= ib_sg_to_pages(ibmr
, sg
, sg_nents
, sg_offset
,
1875 ib_dma_sync_single_for_device(ibmr
->device
, mr
->desc_map
,
1876 mr
->desc_size
* mr
->max_descs
,