1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
8 #define VALID_MR_FLAGS \
9 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ)
11 static enum gdma_mr_access_flags
12 mana_ib_verbs_to_gdma_access_flags(int access_flags
)
14 enum gdma_mr_access_flags flags
= GDMA_ACCESS_FLAG_LOCAL_READ
;
16 if (access_flags
& IB_ACCESS_LOCAL_WRITE
)
17 flags
|= GDMA_ACCESS_FLAG_LOCAL_WRITE
;
19 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
20 flags
|= GDMA_ACCESS_FLAG_REMOTE_WRITE
;
22 if (access_flags
& IB_ACCESS_REMOTE_READ
)
23 flags
|= GDMA_ACCESS_FLAG_REMOTE_READ
;
28 static int mana_ib_gd_create_mr(struct mana_ib_dev
*dev
, struct mana_ib_mr
*mr
,
29 struct gdma_create_mr_params
*mr_params
)
31 struct gdma_create_mr_response resp
= {};
32 struct gdma_create_mr_request req
= {};
33 struct gdma_context
*gc
= mdev_to_gc(dev
);
36 mana_gd_init_req_hdr(&req
.hdr
, GDMA_CREATE_MR
, sizeof(req
),
38 req
.pd_handle
= mr_params
->pd_handle
;
39 req
.mr_type
= mr_params
->mr_type
;
41 switch (mr_params
->mr_type
) {
42 case GDMA_MR_TYPE_GVA
:
43 req
.gva
.dma_region_handle
= mr_params
->gva
.dma_region_handle
;
44 req
.gva
.virtual_address
= mr_params
->gva
.virtual_address
;
45 req
.gva
.access_flags
= mr_params
->gva
.access_flags
;
49 ibdev_dbg(&dev
->ib_dev
,
50 "invalid param (GDMA_MR_TYPE) passed, type %d\n",
55 err
= mana_gd_send_request(gc
, sizeof(req
), &req
, sizeof(resp
), &resp
);
57 if (err
|| resp
.hdr
.status
) {
58 ibdev_dbg(&dev
->ib_dev
, "Failed to create mr %d, %u", err
,
66 mr
->ibmr
.lkey
= resp
.lkey
;
67 mr
->ibmr
.rkey
= resp
.rkey
;
68 mr
->mr_handle
= resp
.mr_handle
;
73 static int mana_ib_gd_destroy_mr(struct mana_ib_dev
*dev
, u64 mr_handle
)
75 struct gdma_destroy_mr_response resp
= {};
76 struct gdma_destroy_mr_request req
= {};
77 struct gdma_context
*gc
= mdev_to_gc(dev
);
80 mana_gd_init_req_hdr(&req
.hdr
, GDMA_DESTROY_MR
, sizeof(req
),
83 req
.mr_handle
= mr_handle
;
85 err
= mana_gd_send_request(gc
, sizeof(req
), &req
, sizeof(resp
), &resp
);
86 if (err
|| resp
.hdr
.status
) {
87 dev_err(gc
->dev
, "Failed to destroy MR: %d, 0x%x\n", err
,
97 struct ib_mr
*mana_ib_reg_user_mr(struct ib_pd
*ibpd
, u64 start
, u64 length
,
98 u64 iova
, int access_flags
,
99 struct ib_udata
*udata
)
101 struct mana_ib_pd
*pd
= container_of(ibpd
, struct mana_ib_pd
, ibpd
);
102 struct gdma_create_mr_params mr_params
= {};
103 struct ib_device
*ibdev
= ibpd
->device
;
104 struct mana_ib_dev
*dev
;
105 struct mana_ib_mr
*mr
;
106 u64 dma_region_handle
;
109 dev
= container_of(ibdev
, struct mana_ib_dev
, ib_dev
);
112 "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
113 start
, iova
, length
, access_flags
);
115 access_flags
&= ~IB_ACCESS_OPTIONAL
;
116 if (access_flags
& ~VALID_MR_FLAGS
)
117 return ERR_PTR(-EINVAL
);
119 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
121 return ERR_PTR(-ENOMEM
);
123 mr
->umem
= ib_umem_get(ibdev
, start
, length
, access_flags
);
124 if (IS_ERR(mr
->umem
)) {
125 err
= PTR_ERR(mr
->umem
);
127 "Failed to get umem for register user-mr, %d\n", err
);
131 err
= mana_ib_create_dma_region(dev
, mr
->umem
, &dma_region_handle
, iova
);
133 ibdev_dbg(ibdev
, "Failed create dma region for user-mr, %d\n",
139 "created dma region for user-mr 0x%llx\n",
142 mr_params
.pd_handle
= pd
->pd_handle
;
143 mr_params
.mr_type
= GDMA_MR_TYPE_GVA
;
144 mr_params
.gva
.dma_region_handle
= dma_region_handle
;
145 mr_params
.gva
.virtual_address
= iova
;
146 mr_params
.gva
.access_flags
=
147 mana_ib_verbs_to_gdma_access_flags(access_flags
);
149 err
= mana_ib_gd_create_mr(dev
, mr
, &mr_params
);
154 * There is no need to keep track of dma_region_handle after MR is
155 * successfully created. The dma_region_handle is tracked in the PF
156 * as part of the lifecycle of this MR.
162 mana_gd_destroy_dma_region(mdev_to_gc(dev
), dma_region_handle
);
165 ib_umem_release(mr
->umem
);
172 int mana_ib_dereg_mr(struct ib_mr
*ibmr
, struct ib_udata
*udata
)
174 struct mana_ib_mr
*mr
= container_of(ibmr
, struct mana_ib_mr
, ibmr
);
175 struct ib_device
*ibdev
= ibmr
->device
;
176 struct mana_ib_dev
*dev
;
179 dev
= container_of(ibdev
, struct mana_ib_dev
, ib_dev
);
181 err
= mana_ib_gd_destroy_mr(dev
, mr
->mr_handle
);
186 ib_umem_release(mr
->umem
);