2 * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of EITHER the GNU General Public License
6 * version 2 as published by the Free Software Foundation or the BSD
7 * 2-Clause License. This program is distributed in the hope that it
8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10 * See the GNU General Public License version 2 for more details at
11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
13 * You should have received a copy of the GNU General Public License
14 * along with this program available in the file COPYING in the main
15 * directory of this source tree.
17 * The BSD 2-Clause License
19 * Redistribution and use in source and binary forms, with or
20 * without modification, are permitted provided that the following
23 * - Redistributions of source code must retain the above
24 * copyright notice, this list of conditions and the following
27 * - Redistributions in binary form must reproduce the above
28 * copyright notice, this list of conditions and the following
29 * disclaimer in the documentation and/or other materials
30 * provided with the distribution.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
46 #include <linux/list.h>
47 #include <linux/slab.h>
52 * pvrdma_get_dma_mr - get a DMA memory region
53 * @pd: protection domain
56 * @return: ib_mr pointer on success, otherwise returns an errno.
58 struct ib_mr
*pvrdma_get_dma_mr(struct ib_pd
*pd
, int acc
)
60 struct pvrdma_dev
*dev
= to_vdev(pd
->device
);
61 struct pvrdma_user_mr
*mr
;
62 union pvrdma_cmd_req req
;
63 union pvrdma_cmd_resp rsp
;
64 struct pvrdma_cmd_create_mr
*cmd
= &req
.create_mr
;
65 struct pvrdma_cmd_create_mr_resp
*resp
= &rsp
.create_mr_resp
;
68 /* Support only LOCAL_WRITE flag for DMA MRs */
69 if (acc
& ~IB_ACCESS_LOCAL_WRITE
) {
70 dev_warn(&dev
->pdev
->dev
,
71 "unsupported dma mr access flags %#x\n", acc
);
72 return ERR_PTR(-EOPNOTSUPP
);
75 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
77 return ERR_PTR(-ENOMEM
);
79 memset(cmd
, 0, sizeof(*cmd
));
80 cmd
->hdr
.cmd
= PVRDMA_CMD_CREATE_MR
;
81 cmd
->pd_handle
= to_vpd(pd
)->pd_handle
;
82 cmd
->access_flags
= acc
;
83 cmd
->flags
= PVRDMA_MR_FLAG_DMA
;
85 ret
= pvrdma_cmd_post(dev
, &req
, &rsp
, PVRDMA_CMD_CREATE_MR_RESP
);
87 dev_warn(&dev
->pdev
->dev
,
88 "could not get DMA mem region, error: %d\n", ret
);
93 mr
->mmr
.mr_handle
= resp
->mr_handle
;
94 mr
->ibmr
.lkey
= resp
->lkey
;
95 mr
->ibmr
.rkey
= resp
->rkey
;
101 * pvrdma_reg_user_mr - register a userspace memory region
102 * @pd: protection domain
103 * @start: starting address
104 * @length: length of region
105 * @virt_addr: I/O virtual address
106 * @access_flags: access flags for memory region
109 * @return: ib_mr pointer on success, otherwise returns an errno.
111 struct ib_mr
*pvrdma_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
112 u64 virt_addr
, int access_flags
,
113 struct ib_udata
*udata
)
115 struct pvrdma_dev
*dev
= to_vdev(pd
->device
);
116 struct pvrdma_user_mr
*mr
= NULL
;
117 struct ib_umem
*umem
;
118 union pvrdma_cmd_req req
;
119 union pvrdma_cmd_resp rsp
;
120 struct pvrdma_cmd_create_mr
*cmd
= &req
.create_mr
;
121 struct pvrdma_cmd_create_mr_resp
*resp
= &rsp
.create_mr_resp
;
124 if (length
== 0 || length
> dev
->dsr
->caps
.max_mr_size
) {
125 dev_warn(&dev
->pdev
->dev
, "invalid mem region length\n");
126 return ERR_PTR(-EINVAL
);
129 umem
= ib_umem_get(pd
->device
, start
, length
, access_flags
);
131 dev_warn(&dev
->pdev
->dev
,
132 "could not get umem for mem region\n");
133 return ERR_CAST(umem
);
136 npages
= ib_umem_num_pages(umem
);
137 if (npages
< 0 || npages
> PVRDMA_PAGE_DIR_MAX_PAGES
) {
138 dev_warn(&dev
->pdev
->dev
, "overflow %d pages in mem region\n",
144 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
150 mr
->mmr
.iova
= virt_addr
;
151 mr
->mmr
.size
= length
;
154 ret
= pvrdma_page_dir_init(dev
, &mr
->pdir
, npages
, false);
156 dev_warn(&dev
->pdev
->dev
,
157 "could not allocate page directory\n");
161 ret
= pvrdma_page_dir_insert_umem(&mr
->pdir
, mr
->umem
, 0);
165 memset(cmd
, 0, sizeof(*cmd
));
166 cmd
->hdr
.cmd
= PVRDMA_CMD_CREATE_MR
;
168 cmd
->length
= length
;
169 cmd
->pd_handle
= to_vpd(pd
)->pd_handle
;
170 cmd
->access_flags
= access_flags
;
171 cmd
->nchunks
= npages
;
172 cmd
->pdir_dma
= mr
->pdir
.dir_dma
;
174 ret
= pvrdma_cmd_post(dev
, &req
, &rsp
, PVRDMA_CMD_CREATE_MR_RESP
);
176 dev_warn(&dev
->pdev
->dev
,
177 "could not register mem region, error: %d\n", ret
);
181 mr
->mmr
.mr_handle
= resp
->mr_handle
;
182 mr
->ibmr
.lkey
= resp
->lkey
;
183 mr
->ibmr
.rkey
= resp
->rkey
;
188 pvrdma_page_dir_cleanup(dev
, &mr
->pdir
);
190 ib_umem_release(umem
);
197 * pvrdma_alloc_mr - allocate a memory region
198 * @pd: protection domain
199 * @mr_type: type of memory region
200 * @max_num_sg: maximum number of pages
202 * @return: ib_mr pointer on success, otherwise returns an errno.
204 struct ib_mr
*pvrdma_alloc_mr(struct ib_pd
*pd
, enum ib_mr_type mr_type
,
205 u32 max_num_sg
, struct ib_udata
*udata
)
207 struct pvrdma_dev
*dev
= to_vdev(pd
->device
);
208 struct pvrdma_user_mr
*mr
;
209 union pvrdma_cmd_req req
;
210 union pvrdma_cmd_resp rsp
;
211 struct pvrdma_cmd_create_mr
*cmd
= &req
.create_mr
;
212 struct pvrdma_cmd_create_mr_resp
*resp
= &rsp
.create_mr_resp
;
213 int size
= max_num_sg
* sizeof(u64
);
216 if (mr_type
!= IB_MR_TYPE_MEM_REG
||
217 max_num_sg
> PVRDMA_MAX_FAST_REG_PAGES
)
218 return ERR_PTR(-EINVAL
);
220 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
222 return ERR_PTR(-ENOMEM
);
224 mr
->pages
= kzalloc(size
, GFP_KERNEL
);
230 ret
= pvrdma_page_dir_init(dev
, &mr
->pdir
, max_num_sg
, false);
232 dev_warn(&dev
->pdev
->dev
,
233 "failed to allocate page dir for mr\n");
238 memset(cmd
, 0, sizeof(*cmd
));
239 cmd
->hdr
.cmd
= PVRDMA_CMD_CREATE_MR
;
240 cmd
->pd_handle
= to_vpd(pd
)->pd_handle
;
241 cmd
->access_flags
= 0;
242 cmd
->flags
= PVRDMA_MR_FLAG_FRMR
;
243 cmd
->nchunks
= max_num_sg
;
245 ret
= pvrdma_cmd_post(dev
, &req
, &rsp
, PVRDMA_CMD_CREATE_MR_RESP
);
247 dev_warn(&dev
->pdev
->dev
,
248 "could not create FR mem region, error: %d\n", ret
);
252 mr
->max_pages
= max_num_sg
;
253 mr
->mmr
.mr_handle
= resp
->mr_handle
;
254 mr
->ibmr
.lkey
= resp
->lkey
;
255 mr
->ibmr
.rkey
= resp
->rkey
;
256 mr
->page_shift
= PAGE_SHIFT
;
262 pvrdma_page_dir_cleanup(dev
, &mr
->pdir
);
271 * pvrdma_dereg_mr - deregister a memory region
272 * @ibmr: memory region
274 * @return: 0 on success.
276 int pvrdma_dereg_mr(struct ib_mr
*ibmr
, struct ib_udata
*udata
)
278 struct pvrdma_user_mr
*mr
= to_vmr(ibmr
);
279 struct pvrdma_dev
*dev
= to_vdev(ibmr
->device
);
280 union pvrdma_cmd_req req
;
281 struct pvrdma_cmd_destroy_mr
*cmd
= &req
.destroy_mr
;
284 memset(cmd
, 0, sizeof(*cmd
));
285 cmd
->hdr
.cmd
= PVRDMA_CMD_DESTROY_MR
;
286 cmd
->mr_handle
= mr
->mmr
.mr_handle
;
287 ret
= pvrdma_cmd_post(dev
, &req
, NULL
, 0);
289 dev_warn(&dev
->pdev
->dev
,
290 "could not deregister mem region, error: %d\n", ret
);
292 pvrdma_page_dir_cleanup(dev
, &mr
->pdir
);
293 ib_umem_release(mr
->umem
);
301 static int pvrdma_set_page(struct ib_mr
*ibmr
, u64 addr
)
303 struct pvrdma_user_mr
*mr
= to_vmr(ibmr
);
305 if (mr
->npages
== mr
->max_pages
)
308 mr
->pages
[mr
->npages
++] = addr
;
312 int pvrdma_map_mr_sg(struct ib_mr
*ibmr
, struct scatterlist
*sg
, int sg_nents
,
313 unsigned int *sg_offset
)
315 struct pvrdma_user_mr
*mr
= to_vmr(ibmr
);
316 struct pvrdma_dev
*dev
= to_vdev(ibmr
->device
);
321 ret
= ib_sg_to_pages(ibmr
, sg
, sg_nents
, sg_offset
, pvrdma_set_page
);
323 dev_warn(&dev
->pdev
->dev
, "could not map sg to pages\n");