locking/refcounts: Include fewer headers in <linux/refcount.h>
[linux/fpc-iii.git] / drivers / infiniband / core / uverbs_cmd.c
blob3e90b6a1d9d2d6a203d13d945e9322a9ec154fe8
1 /*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
36 #include <linux/file.h>
37 #include <linux/fs.h>
38 #include <linux/slab.h>
39 #include <linux/sched.h>
41 #include <linux/uaccess.h>
43 #include <rdma/uverbs_types.h>
44 #include <rdma/uverbs_std_types.h>
45 #include "rdma_core.h"
47 #include "uverbs.h"
48 #include "core_priv.h"
50 static struct ib_uverbs_completion_event_file *
51 ib_uverbs_lookup_comp_file(int fd, struct ib_ucontext *context)
53 struct ib_uobject *uobj = uobj_get_read(UVERBS_OBJECT_COMP_CHANNEL,
54 fd, context);
55 struct ib_uobject_file *uobj_file;
57 if (IS_ERR(uobj))
58 return (void *)uobj;
60 uverbs_uobject_get(uobj);
61 uobj_put_read(uobj);
63 uobj_file = container_of(uobj, struct ib_uobject_file, uobj);
64 return container_of(uobj_file, struct ib_uverbs_completion_event_file,
65 uobj_file);
68 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
69 struct ib_device *ib_dev,
70 const char __user *buf,
71 int in_len, int out_len)
73 struct ib_uverbs_get_context cmd;
74 struct ib_uverbs_get_context_resp resp;
75 struct ib_udata udata;
76 struct ib_ucontext *ucontext;
77 struct file *filp;
78 struct ib_rdmacg_object cg_obj;
79 int ret;
81 if (out_len < sizeof resp)
82 return -ENOSPC;
84 if (copy_from_user(&cmd, buf, sizeof cmd))
85 return -EFAULT;
87 mutex_lock(&file->mutex);
89 if (file->ucontext) {
90 ret = -EINVAL;
91 goto err;
94 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
95 u64_to_user_ptr(cmd.response) + sizeof(resp),
96 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
97 out_len - sizeof(resp));
99 ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
100 if (ret)
101 goto err;
103 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata);
104 if (IS_ERR(ucontext)) {
105 ret = PTR_ERR(ucontext);
106 goto err_alloc;
109 ucontext->device = ib_dev;
110 ucontext->cg_obj = cg_obj;
111 /* ufile is required when some objects are released */
112 ucontext->ufile = file;
113 uverbs_initialize_ucontext(ucontext);
115 rcu_read_lock();
116 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
117 rcu_read_unlock();
118 ucontext->closing = 0;
120 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
121 ucontext->umem_tree = RB_ROOT_CACHED;
122 init_rwsem(&ucontext->umem_rwsem);
123 ucontext->odp_mrs_count = 0;
124 INIT_LIST_HEAD(&ucontext->no_private_counters);
126 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
127 ucontext->invalidate_range = NULL;
129 #endif
131 resp.num_comp_vectors = file->device->num_comp_vectors;
133 ret = get_unused_fd_flags(O_CLOEXEC);
134 if (ret < 0)
135 goto err_free;
136 resp.async_fd = ret;
138 filp = ib_uverbs_alloc_async_event_file(file, ib_dev);
139 if (IS_ERR(filp)) {
140 ret = PTR_ERR(filp);
141 goto err_fd;
144 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
145 ret = -EFAULT;
146 goto err_file;
149 file->ucontext = ucontext;
151 fd_install(resp.async_fd, filp);
153 mutex_unlock(&file->mutex);
155 return in_len;
157 err_file:
158 ib_uverbs_free_async_event_file(file);
159 fput(filp);
161 err_fd:
162 put_unused_fd(resp.async_fd);
164 err_free:
165 put_pid(ucontext->tgid);
166 ib_dev->dealloc_ucontext(ucontext);
168 err_alloc:
169 ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
171 err:
172 mutex_unlock(&file->mutex);
173 return ret;
176 static void copy_query_dev_fields(struct ib_uverbs_file *file,
177 struct ib_device *ib_dev,
178 struct ib_uverbs_query_device_resp *resp,
179 struct ib_device_attr *attr)
181 resp->fw_ver = attr->fw_ver;
182 resp->node_guid = ib_dev->node_guid;
183 resp->sys_image_guid = attr->sys_image_guid;
184 resp->max_mr_size = attr->max_mr_size;
185 resp->page_size_cap = attr->page_size_cap;
186 resp->vendor_id = attr->vendor_id;
187 resp->vendor_part_id = attr->vendor_part_id;
188 resp->hw_ver = attr->hw_ver;
189 resp->max_qp = attr->max_qp;
190 resp->max_qp_wr = attr->max_qp_wr;
191 resp->device_cap_flags = lower_32_bits(attr->device_cap_flags);
192 resp->max_sge = attr->max_sge;
193 resp->max_sge_rd = attr->max_sge_rd;
194 resp->max_cq = attr->max_cq;
195 resp->max_cqe = attr->max_cqe;
196 resp->max_mr = attr->max_mr;
197 resp->max_pd = attr->max_pd;
198 resp->max_qp_rd_atom = attr->max_qp_rd_atom;
199 resp->max_ee_rd_atom = attr->max_ee_rd_atom;
200 resp->max_res_rd_atom = attr->max_res_rd_atom;
201 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
202 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
203 resp->atomic_cap = attr->atomic_cap;
204 resp->max_ee = attr->max_ee;
205 resp->max_rdd = attr->max_rdd;
206 resp->max_mw = attr->max_mw;
207 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
208 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
209 resp->max_mcast_grp = attr->max_mcast_grp;
210 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
211 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
212 resp->max_ah = attr->max_ah;
213 resp->max_fmr = attr->max_fmr;
214 resp->max_map_per_fmr = attr->max_map_per_fmr;
215 resp->max_srq = attr->max_srq;
216 resp->max_srq_wr = attr->max_srq_wr;
217 resp->max_srq_sge = attr->max_srq_sge;
218 resp->max_pkeys = attr->max_pkeys;
219 resp->local_ca_ack_delay = attr->local_ca_ack_delay;
220 resp->phys_port_cnt = ib_dev->phys_port_cnt;
223 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
224 struct ib_device *ib_dev,
225 const char __user *buf,
226 int in_len, int out_len)
228 struct ib_uverbs_query_device cmd;
229 struct ib_uverbs_query_device_resp resp;
231 if (out_len < sizeof resp)
232 return -ENOSPC;
234 if (copy_from_user(&cmd, buf, sizeof cmd))
235 return -EFAULT;
237 memset(&resp, 0, sizeof resp);
238 copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs);
240 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
241 return -EFAULT;
243 return in_len;
246 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
247 struct ib_device *ib_dev,
248 const char __user *buf,
249 int in_len, int out_len)
251 struct ib_uverbs_query_port cmd;
252 struct ib_uverbs_query_port_resp resp;
253 struct ib_port_attr attr;
254 int ret;
256 if (out_len < sizeof resp)
257 return -ENOSPC;
259 if (copy_from_user(&cmd, buf, sizeof cmd))
260 return -EFAULT;
262 ret = ib_query_port(ib_dev, cmd.port_num, &attr);
263 if (ret)
264 return ret;
266 memset(&resp, 0, sizeof resp);
268 resp.state = attr.state;
269 resp.max_mtu = attr.max_mtu;
270 resp.active_mtu = attr.active_mtu;
271 resp.gid_tbl_len = attr.gid_tbl_len;
272 resp.port_cap_flags = attr.port_cap_flags;
273 resp.max_msg_sz = attr.max_msg_sz;
274 resp.bad_pkey_cntr = attr.bad_pkey_cntr;
275 resp.qkey_viol_cntr = attr.qkey_viol_cntr;
276 resp.pkey_tbl_len = attr.pkey_tbl_len;
278 if (rdma_cap_opa_ah(ib_dev, cmd.port_num)) {
279 resp.lid = OPA_TO_IB_UCAST_LID(attr.lid);
280 resp.sm_lid = OPA_TO_IB_UCAST_LID(attr.sm_lid);
281 } else {
282 resp.lid = ib_lid_cpu16(attr.lid);
283 resp.sm_lid = ib_lid_cpu16(attr.sm_lid);
285 resp.lmc = attr.lmc;
286 resp.max_vl_num = attr.max_vl_num;
287 resp.sm_sl = attr.sm_sl;
288 resp.subnet_timeout = attr.subnet_timeout;
289 resp.init_type_reply = attr.init_type_reply;
290 resp.active_width = attr.active_width;
291 resp.active_speed = attr.active_speed;
292 resp.phys_state = attr.phys_state;
293 resp.link_layer = rdma_port_get_link_layer(ib_dev,
294 cmd.port_num);
296 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
297 return -EFAULT;
299 return in_len;
302 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
303 struct ib_device *ib_dev,
304 const char __user *buf,
305 int in_len, int out_len)
307 struct ib_uverbs_alloc_pd cmd;
308 struct ib_uverbs_alloc_pd_resp resp;
309 struct ib_udata udata;
310 struct ib_uobject *uobj;
311 struct ib_pd *pd;
312 int ret;
314 if (out_len < sizeof resp)
315 return -ENOSPC;
317 if (copy_from_user(&cmd, buf, sizeof cmd))
318 return -EFAULT;
320 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
321 u64_to_user_ptr(cmd.response) + sizeof(resp),
322 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
323 out_len - sizeof(resp));
325 uobj = uobj_alloc(UVERBS_OBJECT_PD, file->ucontext);
326 if (IS_ERR(uobj))
327 return PTR_ERR(uobj);
329 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata);
330 if (IS_ERR(pd)) {
331 ret = PTR_ERR(pd);
332 goto err;
335 pd->device = ib_dev;
336 pd->uobject = uobj;
337 pd->__internal_mr = NULL;
338 atomic_set(&pd->usecnt, 0);
340 uobj->object = pd;
341 memset(&resp, 0, sizeof resp);
342 resp.pd_handle = uobj->id;
343 pd->res.type = RDMA_RESTRACK_PD;
344 rdma_restrack_add(&pd->res);
346 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
347 ret = -EFAULT;
348 goto err_copy;
351 uobj_alloc_commit(uobj);
353 return in_len;
355 err_copy:
356 ib_dealloc_pd(pd);
358 err:
359 uobj_alloc_abort(uobj);
360 return ret;
363 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
364 struct ib_device *ib_dev,
365 const char __user *buf,
366 int in_len, int out_len)
368 struct ib_uverbs_dealloc_pd cmd;
369 struct ib_uobject *uobj;
370 int ret;
372 if (copy_from_user(&cmd, buf, sizeof cmd))
373 return -EFAULT;
375 uobj = uobj_get_write(UVERBS_OBJECT_PD, cmd.pd_handle,
376 file->ucontext);
377 if (IS_ERR(uobj))
378 return PTR_ERR(uobj);
380 ret = uobj_remove_commit(uobj);
382 return ret ?: in_len;
385 struct xrcd_table_entry {
386 struct rb_node node;
387 struct ib_xrcd *xrcd;
388 struct inode *inode;
391 static int xrcd_table_insert(struct ib_uverbs_device *dev,
392 struct inode *inode,
393 struct ib_xrcd *xrcd)
395 struct xrcd_table_entry *entry, *scan;
396 struct rb_node **p = &dev->xrcd_tree.rb_node;
397 struct rb_node *parent = NULL;
399 entry = kmalloc(sizeof *entry, GFP_KERNEL);
400 if (!entry)
401 return -ENOMEM;
403 entry->xrcd = xrcd;
404 entry->inode = inode;
406 while (*p) {
407 parent = *p;
408 scan = rb_entry(parent, struct xrcd_table_entry, node);
410 if (inode < scan->inode) {
411 p = &(*p)->rb_left;
412 } else if (inode > scan->inode) {
413 p = &(*p)->rb_right;
414 } else {
415 kfree(entry);
416 return -EEXIST;
420 rb_link_node(&entry->node, parent, p);
421 rb_insert_color(&entry->node, &dev->xrcd_tree);
422 igrab(inode);
423 return 0;
426 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
427 struct inode *inode)
429 struct xrcd_table_entry *entry;
430 struct rb_node *p = dev->xrcd_tree.rb_node;
432 while (p) {
433 entry = rb_entry(p, struct xrcd_table_entry, node);
435 if (inode < entry->inode)
436 p = p->rb_left;
437 else if (inode > entry->inode)
438 p = p->rb_right;
439 else
440 return entry;
443 return NULL;
446 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
448 struct xrcd_table_entry *entry;
450 entry = xrcd_table_search(dev, inode);
451 if (!entry)
452 return NULL;
454 return entry->xrcd;
457 static void xrcd_table_delete(struct ib_uverbs_device *dev,
458 struct inode *inode)
460 struct xrcd_table_entry *entry;
462 entry = xrcd_table_search(dev, inode);
463 if (entry) {
464 iput(inode);
465 rb_erase(&entry->node, &dev->xrcd_tree);
466 kfree(entry);
470 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
471 struct ib_device *ib_dev,
472 const char __user *buf, int in_len,
473 int out_len)
475 struct ib_uverbs_open_xrcd cmd;
476 struct ib_uverbs_open_xrcd_resp resp;
477 struct ib_udata udata;
478 struct ib_uxrcd_object *obj;
479 struct ib_xrcd *xrcd = NULL;
480 struct fd f = {NULL, 0};
481 struct inode *inode = NULL;
482 int ret = 0;
483 int new_xrcd = 0;
485 if (out_len < sizeof resp)
486 return -ENOSPC;
488 if (copy_from_user(&cmd, buf, sizeof cmd))
489 return -EFAULT;
491 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
492 u64_to_user_ptr(cmd.response) + sizeof(resp),
493 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
494 out_len - sizeof(resp));
496 mutex_lock(&file->device->xrcd_tree_mutex);
498 if (cmd.fd != -1) {
499 /* search for file descriptor */
500 f = fdget(cmd.fd);
501 if (!f.file) {
502 ret = -EBADF;
503 goto err_tree_mutex_unlock;
506 inode = file_inode(f.file);
507 xrcd = find_xrcd(file->device, inode);
508 if (!xrcd && !(cmd.oflags & O_CREAT)) {
509 /* no file descriptor. Need CREATE flag */
510 ret = -EAGAIN;
511 goto err_tree_mutex_unlock;
514 if (xrcd && cmd.oflags & O_EXCL) {
515 ret = -EINVAL;
516 goto err_tree_mutex_unlock;
520 obj = (struct ib_uxrcd_object *)uobj_alloc(UVERBS_OBJECT_XRCD,
521 file->ucontext);
522 if (IS_ERR(obj)) {
523 ret = PTR_ERR(obj);
524 goto err_tree_mutex_unlock;
527 if (!xrcd) {
528 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata);
529 if (IS_ERR(xrcd)) {
530 ret = PTR_ERR(xrcd);
531 goto err;
534 xrcd->inode = inode;
535 xrcd->device = ib_dev;
536 atomic_set(&xrcd->usecnt, 0);
537 mutex_init(&xrcd->tgt_qp_mutex);
538 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
539 new_xrcd = 1;
542 atomic_set(&obj->refcnt, 0);
543 obj->uobject.object = xrcd;
544 memset(&resp, 0, sizeof resp);
545 resp.xrcd_handle = obj->uobject.id;
547 if (inode) {
548 if (new_xrcd) {
549 /* create new inode/xrcd table entry */
550 ret = xrcd_table_insert(file->device, inode, xrcd);
551 if (ret)
552 goto err_dealloc_xrcd;
554 atomic_inc(&xrcd->usecnt);
557 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
558 ret = -EFAULT;
559 goto err_copy;
562 if (f.file)
563 fdput(f);
565 mutex_unlock(&file->device->xrcd_tree_mutex);
567 uobj_alloc_commit(&obj->uobject);
569 return in_len;
571 err_copy:
572 if (inode) {
573 if (new_xrcd)
574 xrcd_table_delete(file->device, inode);
575 atomic_dec(&xrcd->usecnt);
578 err_dealloc_xrcd:
579 ib_dealloc_xrcd(xrcd);
581 err:
582 uobj_alloc_abort(&obj->uobject);
584 err_tree_mutex_unlock:
585 if (f.file)
586 fdput(f);
588 mutex_unlock(&file->device->xrcd_tree_mutex);
590 return ret;
593 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
594 struct ib_device *ib_dev,
595 const char __user *buf, int in_len,
596 int out_len)
598 struct ib_uverbs_close_xrcd cmd;
599 struct ib_uobject *uobj;
600 int ret = 0;
602 if (copy_from_user(&cmd, buf, sizeof cmd))
603 return -EFAULT;
605 uobj = uobj_get_write(UVERBS_OBJECT_XRCD, cmd.xrcd_handle,
606 file->ucontext);
607 if (IS_ERR(uobj))
608 return PTR_ERR(uobj);
610 ret = uobj_remove_commit(uobj);
611 return ret ?: in_len;
614 int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
615 struct ib_xrcd *xrcd,
616 enum rdma_remove_reason why)
618 struct inode *inode;
619 int ret;
621 inode = xrcd->inode;
622 if (inode && !atomic_dec_and_test(&xrcd->usecnt))
623 return 0;
625 ret = ib_dealloc_xrcd(xrcd);
627 if (why == RDMA_REMOVE_DESTROY && ret)
628 atomic_inc(&xrcd->usecnt);
629 else if (inode)
630 xrcd_table_delete(dev, inode);
632 return ret;
635 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
636 struct ib_device *ib_dev,
637 const char __user *buf, int in_len,
638 int out_len)
640 struct ib_uverbs_reg_mr cmd;
641 struct ib_uverbs_reg_mr_resp resp;
642 struct ib_udata udata;
643 struct ib_uobject *uobj;
644 struct ib_pd *pd;
645 struct ib_mr *mr;
646 int ret;
648 if (out_len < sizeof resp)
649 return -ENOSPC;
651 if (copy_from_user(&cmd, buf, sizeof cmd))
652 return -EFAULT;
654 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
655 u64_to_user_ptr(cmd.response) + sizeof(resp),
656 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
657 out_len - sizeof(resp));
659 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
660 return -EINVAL;
662 ret = ib_check_mr_access(cmd.access_flags);
663 if (ret)
664 return ret;
666 uobj = uobj_alloc(UVERBS_OBJECT_MR, file->ucontext);
667 if (IS_ERR(uobj))
668 return PTR_ERR(uobj);
670 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext);
671 if (!pd) {
672 ret = -EINVAL;
673 goto err_free;
676 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
677 if (!(pd->device->attrs.device_cap_flags &
678 IB_DEVICE_ON_DEMAND_PAGING)) {
679 pr_debug("ODP support not available\n");
680 ret = -EINVAL;
681 goto err_put;
685 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
686 cmd.access_flags, &udata);
687 if (IS_ERR(mr)) {
688 ret = PTR_ERR(mr);
689 goto err_put;
692 mr->device = pd->device;
693 mr->pd = pd;
694 mr->dm = NULL;
695 mr->uobject = uobj;
696 atomic_inc(&pd->usecnt);
697 mr->res.type = RDMA_RESTRACK_MR;
698 rdma_restrack_add(&mr->res);
700 uobj->object = mr;
702 memset(&resp, 0, sizeof resp);
703 resp.lkey = mr->lkey;
704 resp.rkey = mr->rkey;
705 resp.mr_handle = uobj->id;
707 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
708 ret = -EFAULT;
709 goto err_copy;
712 uobj_put_obj_read(pd);
714 uobj_alloc_commit(uobj);
716 return in_len;
718 err_copy:
719 ib_dereg_mr(mr);
721 err_put:
722 uobj_put_obj_read(pd);
724 err_free:
725 uobj_alloc_abort(uobj);
726 return ret;
729 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
730 struct ib_device *ib_dev,
731 const char __user *buf, int in_len,
732 int out_len)
734 struct ib_uverbs_rereg_mr cmd;
735 struct ib_uverbs_rereg_mr_resp resp;
736 struct ib_udata udata;
737 struct ib_pd *pd = NULL;
738 struct ib_mr *mr;
739 struct ib_pd *old_pd;
740 int ret;
741 struct ib_uobject *uobj;
743 if (out_len < sizeof(resp))
744 return -ENOSPC;
746 if (copy_from_user(&cmd, buf, sizeof(cmd)))
747 return -EFAULT;
749 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
750 u64_to_user_ptr(cmd.response) + sizeof(resp),
751 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
752 out_len - sizeof(resp));
754 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
755 return -EINVAL;
757 if ((cmd.flags & IB_MR_REREG_TRANS) &&
758 (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
759 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
760 return -EINVAL;
762 uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle,
763 file->ucontext);
764 if (IS_ERR(uobj))
765 return PTR_ERR(uobj);
767 mr = uobj->object;
769 if (mr->dm) {
770 ret = -EINVAL;
771 goto put_uobjs;
774 if (cmd.flags & IB_MR_REREG_ACCESS) {
775 ret = ib_check_mr_access(cmd.access_flags);
776 if (ret)
777 goto put_uobjs;
780 if (cmd.flags & IB_MR_REREG_PD) {
781 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext);
782 if (!pd) {
783 ret = -EINVAL;
784 goto put_uobjs;
788 old_pd = mr->pd;
789 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start,
790 cmd.length, cmd.hca_va,
791 cmd.access_flags, pd, &udata);
792 if (!ret) {
793 if (cmd.flags & IB_MR_REREG_PD) {
794 atomic_inc(&pd->usecnt);
795 mr->pd = pd;
796 atomic_dec(&old_pd->usecnt);
798 } else {
799 goto put_uobj_pd;
802 memset(&resp, 0, sizeof(resp));
803 resp.lkey = mr->lkey;
804 resp.rkey = mr->rkey;
806 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp)))
807 ret = -EFAULT;
808 else
809 ret = in_len;
811 put_uobj_pd:
812 if (cmd.flags & IB_MR_REREG_PD)
813 uobj_put_obj_read(pd);
815 put_uobjs:
816 uobj_put_write(uobj);
818 return ret;
821 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
822 struct ib_device *ib_dev,
823 const char __user *buf, int in_len,
824 int out_len)
826 struct ib_uverbs_dereg_mr cmd;
827 struct ib_uobject *uobj;
828 int ret = -EINVAL;
830 if (copy_from_user(&cmd, buf, sizeof cmd))
831 return -EFAULT;
833 uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle,
834 file->ucontext);
835 if (IS_ERR(uobj))
836 return PTR_ERR(uobj);
838 ret = uobj_remove_commit(uobj);
840 return ret ?: in_len;
843 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
844 struct ib_device *ib_dev,
845 const char __user *buf, int in_len,
846 int out_len)
848 struct ib_uverbs_alloc_mw cmd;
849 struct ib_uverbs_alloc_mw_resp resp;
850 struct ib_uobject *uobj;
851 struct ib_pd *pd;
852 struct ib_mw *mw;
853 struct ib_udata udata;
854 int ret;
856 if (out_len < sizeof(resp))
857 return -ENOSPC;
859 if (copy_from_user(&cmd, buf, sizeof(cmd)))
860 return -EFAULT;
862 uobj = uobj_alloc(UVERBS_OBJECT_MW, file->ucontext);
863 if (IS_ERR(uobj))
864 return PTR_ERR(uobj);
866 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext);
867 if (!pd) {
868 ret = -EINVAL;
869 goto err_free;
872 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
873 u64_to_user_ptr(cmd.response) + sizeof(resp),
874 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
875 out_len - sizeof(resp));
877 mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata);
878 if (IS_ERR(mw)) {
879 ret = PTR_ERR(mw);
880 goto err_put;
883 mw->device = pd->device;
884 mw->pd = pd;
885 mw->uobject = uobj;
886 atomic_inc(&pd->usecnt);
888 uobj->object = mw;
890 memset(&resp, 0, sizeof(resp));
891 resp.rkey = mw->rkey;
892 resp.mw_handle = uobj->id;
894 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) {
895 ret = -EFAULT;
896 goto err_copy;
899 uobj_put_obj_read(pd);
900 uobj_alloc_commit(uobj);
902 return in_len;
904 err_copy:
905 uverbs_dealloc_mw(mw);
906 err_put:
907 uobj_put_obj_read(pd);
908 err_free:
909 uobj_alloc_abort(uobj);
910 return ret;
913 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
914 struct ib_device *ib_dev,
915 const char __user *buf, int in_len,
916 int out_len)
918 struct ib_uverbs_dealloc_mw cmd;
919 struct ib_uobject *uobj;
920 int ret = -EINVAL;
922 if (copy_from_user(&cmd, buf, sizeof(cmd)))
923 return -EFAULT;
925 uobj = uobj_get_write(UVERBS_OBJECT_MW, cmd.mw_handle,
926 file->ucontext);
927 if (IS_ERR(uobj))
928 return PTR_ERR(uobj);
930 ret = uobj_remove_commit(uobj);
931 return ret ?: in_len;
934 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
935 struct ib_device *ib_dev,
936 const char __user *buf, int in_len,
937 int out_len)
939 struct ib_uverbs_create_comp_channel cmd;
940 struct ib_uverbs_create_comp_channel_resp resp;
941 struct ib_uobject *uobj;
942 struct ib_uverbs_completion_event_file *ev_file;
944 if (out_len < sizeof resp)
945 return -ENOSPC;
947 if (copy_from_user(&cmd, buf, sizeof cmd))
948 return -EFAULT;
950 uobj = uobj_alloc(UVERBS_OBJECT_COMP_CHANNEL, file->ucontext);
951 if (IS_ERR(uobj))
952 return PTR_ERR(uobj);
954 resp.fd = uobj->id;
956 ev_file = container_of(uobj, struct ib_uverbs_completion_event_file,
957 uobj_file.uobj);
958 ib_uverbs_init_event_queue(&ev_file->ev_queue);
960 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
961 uobj_alloc_abort(uobj);
962 return -EFAULT;
965 uobj_alloc_commit(uobj);
966 return in_len;
969 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
970 struct ib_device *ib_dev,
971 struct ib_udata *ucore,
972 struct ib_udata *uhw,
973 struct ib_uverbs_ex_create_cq *cmd,
974 size_t cmd_sz,
975 int (*cb)(struct ib_uverbs_file *file,
976 struct ib_ucq_object *obj,
977 struct ib_uverbs_ex_create_cq_resp *resp,
978 struct ib_udata *udata,
979 void *context),
980 void *context)
982 struct ib_ucq_object *obj;
983 struct ib_uverbs_completion_event_file *ev_file = NULL;
984 struct ib_cq *cq;
985 int ret;
986 struct ib_uverbs_ex_create_cq_resp resp;
987 struct ib_cq_init_attr attr = {};
989 if (!ib_dev->create_cq)
990 return ERR_PTR(-EOPNOTSUPP);
992 if (cmd->comp_vector >= file->device->num_comp_vectors)
993 return ERR_PTR(-EINVAL);
995 obj = (struct ib_ucq_object *)uobj_alloc(UVERBS_OBJECT_CQ,
996 file->ucontext);
997 if (IS_ERR(obj))
998 return obj;
1000 if (cmd->comp_channel >= 0) {
1001 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel,
1002 file->ucontext);
1003 if (IS_ERR(ev_file)) {
1004 ret = PTR_ERR(ev_file);
1005 goto err;
1009 obj->uobject.user_handle = cmd->user_handle;
1010 obj->uverbs_file = file;
1011 obj->comp_events_reported = 0;
1012 obj->async_events_reported = 0;
1013 INIT_LIST_HEAD(&obj->comp_list);
1014 INIT_LIST_HEAD(&obj->async_list);
1016 attr.cqe = cmd->cqe;
1017 attr.comp_vector = cmd->comp_vector;
1019 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags))
1020 attr.flags = cmd->flags;
1022 cq = ib_dev->create_cq(ib_dev, &attr, file->ucontext, uhw);
1023 if (IS_ERR(cq)) {
1024 ret = PTR_ERR(cq);
1025 goto err_file;
1028 cq->device = ib_dev;
1029 cq->uobject = &obj->uobject;
1030 cq->comp_handler = ib_uverbs_comp_handler;
1031 cq->event_handler = ib_uverbs_cq_event_handler;
1032 cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
1033 atomic_set(&cq->usecnt, 0);
1035 obj->uobject.object = cq;
1036 memset(&resp, 0, sizeof resp);
1037 resp.base.cq_handle = obj->uobject.id;
1038 resp.base.cqe = cq->cqe;
1040 resp.response_length = offsetof(typeof(resp), response_length) +
1041 sizeof(resp.response_length);
1043 cq->res.type = RDMA_RESTRACK_CQ;
1044 rdma_restrack_add(&cq->res);
1046 ret = cb(file, obj, &resp, ucore, context);
1047 if (ret)
1048 goto err_cb;
1050 uobj_alloc_commit(&obj->uobject);
1051 return obj;
1053 err_cb:
1054 ib_destroy_cq(cq);
1056 err_file:
1057 if (ev_file)
1058 ib_uverbs_release_ucq(file, ev_file, obj);
1060 err:
1061 uobj_alloc_abort(&obj->uobject);
1063 return ERR_PTR(ret);
1066 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file,
1067 struct ib_ucq_object *obj,
1068 struct ib_uverbs_ex_create_cq_resp *resp,
1069 struct ib_udata *ucore, void *context)
1071 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1072 return -EFAULT;
1074 return 0;
1077 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
1078 struct ib_device *ib_dev,
1079 const char __user *buf, int in_len,
1080 int out_len)
1082 struct ib_uverbs_create_cq cmd;
1083 struct ib_uverbs_ex_create_cq cmd_ex;
1084 struct ib_uverbs_create_cq_resp resp;
1085 struct ib_udata ucore;
1086 struct ib_udata uhw;
1087 struct ib_ucq_object *obj;
1089 if (out_len < sizeof(resp))
1090 return -ENOSPC;
1092 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1093 return -EFAULT;
1095 ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response),
1096 sizeof(cmd), sizeof(resp));
1098 ib_uverbs_init_udata(&uhw, buf + sizeof(cmd),
1099 u64_to_user_ptr(cmd.response) + sizeof(resp),
1100 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1101 out_len - sizeof(resp));
1103 memset(&cmd_ex, 0, sizeof(cmd_ex));
1104 cmd_ex.user_handle = cmd.user_handle;
1105 cmd_ex.cqe = cmd.cqe;
1106 cmd_ex.comp_vector = cmd.comp_vector;
1107 cmd_ex.comp_channel = cmd.comp_channel;
1109 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex,
1110 offsetof(typeof(cmd_ex), comp_channel) +
1111 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb,
1112 NULL);
1114 if (IS_ERR(obj))
1115 return PTR_ERR(obj);
1117 return in_len;
1120 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file,
1121 struct ib_ucq_object *obj,
1122 struct ib_uverbs_ex_create_cq_resp *resp,
1123 struct ib_udata *ucore, void *context)
1125 if (ib_copy_to_udata(ucore, resp, resp->response_length))
1126 return -EFAULT;
1128 return 0;
1131 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
1132 struct ib_device *ib_dev,
1133 struct ib_udata *ucore,
1134 struct ib_udata *uhw)
1136 struct ib_uverbs_ex_create_cq_resp resp;
1137 struct ib_uverbs_ex_create_cq cmd;
1138 struct ib_ucq_object *obj;
1139 int err;
1141 if (ucore->inlen < sizeof(cmd))
1142 return -EINVAL;
1144 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
1145 if (err)
1146 return err;
1148 if (cmd.comp_mask)
1149 return -EINVAL;
1151 if (cmd.reserved)
1152 return -EINVAL;
1154 if (ucore->outlen < (offsetof(typeof(resp), response_length) +
1155 sizeof(resp.response_length)))
1156 return -ENOSPC;
1158 obj = create_cq(file, ib_dev, ucore, uhw, &cmd,
1159 min(ucore->inlen, sizeof(cmd)),
1160 ib_uverbs_ex_create_cq_cb, NULL);
1162 return PTR_ERR_OR_ZERO(obj);
1165 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
1166 struct ib_device *ib_dev,
1167 const char __user *buf, int in_len,
1168 int out_len)
1170 struct ib_uverbs_resize_cq cmd;
1171 struct ib_uverbs_resize_cq_resp resp = {};
1172 struct ib_udata udata;
1173 struct ib_cq *cq;
1174 int ret = -EINVAL;
1176 if (copy_from_user(&cmd, buf, sizeof cmd))
1177 return -EFAULT;
1179 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
1180 u64_to_user_ptr(cmd.response) + sizeof(resp),
1181 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1182 out_len - sizeof(resp));
1184 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext);
1185 if (!cq)
1186 return -EINVAL;
1188 ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
1189 if (ret)
1190 goto out;
1192 resp.cqe = cq->cqe;
1194 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp.cqe))
1195 ret = -EFAULT;
1197 out:
1198 uobj_put_obj_read(cq);
1200 return ret ? ret : in_len;
1203 static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest,
1204 struct ib_wc *wc)
1206 struct ib_uverbs_wc tmp;
1208 tmp.wr_id = wc->wr_id;
1209 tmp.status = wc->status;
1210 tmp.opcode = wc->opcode;
1211 tmp.vendor_err = wc->vendor_err;
1212 tmp.byte_len = wc->byte_len;
1213 tmp.ex.imm_data = wc->ex.imm_data;
1214 tmp.qp_num = wc->qp->qp_num;
1215 tmp.src_qp = wc->src_qp;
1216 tmp.wc_flags = wc->wc_flags;
1217 tmp.pkey_index = wc->pkey_index;
1218 if (rdma_cap_opa_ah(ib_dev, wc->port_num))
1219 tmp.slid = OPA_TO_IB_UCAST_LID(wc->slid);
1220 else
1221 tmp.slid = ib_lid_cpu16(wc->slid);
1222 tmp.sl = wc->sl;
1223 tmp.dlid_path_bits = wc->dlid_path_bits;
1224 tmp.port_num = wc->port_num;
1225 tmp.reserved = 0;
1227 if (copy_to_user(dest, &tmp, sizeof tmp))
1228 return -EFAULT;
1230 return 0;
1233 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
1234 struct ib_device *ib_dev,
1235 const char __user *buf, int in_len,
1236 int out_len)
1238 struct ib_uverbs_poll_cq cmd;
1239 struct ib_uverbs_poll_cq_resp resp;
1240 u8 __user *header_ptr;
1241 u8 __user *data_ptr;
1242 struct ib_cq *cq;
1243 struct ib_wc wc;
1244 int ret;
1246 if (copy_from_user(&cmd, buf, sizeof cmd))
1247 return -EFAULT;
1249 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext);
1250 if (!cq)
1251 return -EINVAL;
1253 /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1254 header_ptr = u64_to_user_ptr(cmd.response);
1255 data_ptr = header_ptr + sizeof resp;
1257 memset(&resp, 0, sizeof resp);
1258 while (resp.count < cmd.ne) {
1259 ret = ib_poll_cq(cq, 1, &wc);
1260 if (ret < 0)
1261 goto out_put;
1262 if (!ret)
1263 break;
1265 ret = copy_wc_to_user(ib_dev, data_ptr, &wc);
1266 if (ret)
1267 goto out_put;
1269 data_ptr += sizeof(struct ib_uverbs_wc);
1270 ++resp.count;
1273 if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1274 ret = -EFAULT;
1275 goto out_put;
1278 ret = in_len;
1280 out_put:
1281 uobj_put_obj_read(cq);
1282 return ret;
1285 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
1286 struct ib_device *ib_dev,
1287 const char __user *buf, int in_len,
1288 int out_len)
1290 struct ib_uverbs_req_notify_cq cmd;
1291 struct ib_cq *cq;
1293 if (copy_from_user(&cmd, buf, sizeof cmd))
1294 return -EFAULT;
1296 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext);
1297 if (!cq)
1298 return -EINVAL;
1300 ib_req_notify_cq(cq, cmd.solicited_only ?
1301 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1303 uobj_put_obj_read(cq);
1305 return in_len;
1308 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
1309 struct ib_device *ib_dev,
1310 const char __user *buf, int in_len,
1311 int out_len)
1313 struct ib_uverbs_destroy_cq cmd;
1314 struct ib_uverbs_destroy_cq_resp resp;
1315 struct ib_uobject *uobj;
1316 struct ib_cq *cq;
1317 struct ib_ucq_object *obj;
1318 int ret = -EINVAL;
1320 if (copy_from_user(&cmd, buf, sizeof cmd))
1321 return -EFAULT;
1323 uobj = uobj_get_write(UVERBS_OBJECT_CQ, cmd.cq_handle,
1324 file->ucontext);
1325 if (IS_ERR(uobj))
1326 return PTR_ERR(uobj);
1329 * Make sure we don't free the memory in remove_commit as we still
1330 * needs the uobject memory to create the response.
1332 uverbs_uobject_get(uobj);
1333 cq = uobj->object;
1334 obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
1336 memset(&resp, 0, sizeof(resp));
1338 ret = uobj_remove_commit(uobj);
1339 if (ret) {
1340 uverbs_uobject_put(uobj);
1341 return ret;
1344 resp.comp_events_reported = obj->comp_events_reported;
1345 resp.async_events_reported = obj->async_events_reported;
1347 uverbs_uobject_put(uobj);
1348 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
1349 return -EFAULT;
1351 return in_len;
1354 static int create_qp(struct ib_uverbs_file *file,
1355 struct ib_udata *ucore,
1356 struct ib_udata *uhw,
1357 struct ib_uverbs_ex_create_qp *cmd,
1358 size_t cmd_sz,
1359 int (*cb)(struct ib_uverbs_file *file,
1360 struct ib_uverbs_ex_create_qp_resp *resp,
1361 struct ib_udata *udata),
1362 void *context)
1364 struct ib_uqp_object *obj;
1365 struct ib_device *device;
1366 struct ib_pd *pd = NULL;
1367 struct ib_xrcd *xrcd = NULL;
1368 struct ib_uobject *xrcd_uobj = ERR_PTR(-ENOENT);
1369 struct ib_cq *scq = NULL, *rcq = NULL;
1370 struct ib_srq *srq = NULL;
1371 struct ib_qp *qp;
1372 char *buf;
1373 struct ib_qp_init_attr attr = {};
1374 struct ib_uverbs_ex_create_qp_resp resp;
1375 int ret;
1376 struct ib_rwq_ind_table *ind_tbl = NULL;
1377 bool has_sq = true;
1379 if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
1380 return -EPERM;
1382 obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP,
1383 file->ucontext);
1384 if (IS_ERR(obj))
1385 return PTR_ERR(obj);
1386 obj->uxrcd = NULL;
1387 obj->uevent.uobject.user_handle = cmd->user_handle;
1388 mutex_init(&obj->mcast_lock);
1390 if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) +
1391 sizeof(cmd->rwq_ind_tbl_handle) &&
1392 (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) {
1393 ind_tbl = uobj_get_obj_read(rwq_ind_table, UVERBS_OBJECT_RWQ_IND_TBL,
1394 cmd->rwq_ind_tbl_handle,
1395 file->ucontext);
1396 if (!ind_tbl) {
1397 ret = -EINVAL;
1398 goto err_put;
1401 attr.rwq_ind_tbl = ind_tbl;
1404 if (cmd_sz > sizeof(*cmd) &&
1405 !ib_is_udata_cleared(ucore, sizeof(*cmd),
1406 cmd_sz - sizeof(*cmd))) {
1407 ret = -EOPNOTSUPP;
1408 goto err_put;
1411 if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) {
1412 ret = -EINVAL;
1413 goto err_put;
1416 if (ind_tbl && !cmd->max_send_wr)
1417 has_sq = false;
1419 if (cmd->qp_type == IB_QPT_XRC_TGT) {
1420 xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->pd_handle,
1421 file->ucontext);
1423 if (IS_ERR(xrcd_uobj)) {
1424 ret = -EINVAL;
1425 goto err_put;
1428 xrcd = (struct ib_xrcd *)xrcd_uobj->object;
1429 if (!xrcd) {
1430 ret = -EINVAL;
1431 goto err_put;
1433 device = xrcd->device;
1434 } else {
1435 if (cmd->qp_type == IB_QPT_XRC_INI) {
1436 cmd->max_recv_wr = 0;
1437 cmd->max_recv_sge = 0;
1438 } else {
1439 if (cmd->is_srq) {
1440 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd->srq_handle,
1441 file->ucontext);
1442 if (!srq || srq->srq_type == IB_SRQT_XRC) {
1443 ret = -EINVAL;
1444 goto err_put;
1448 if (!ind_tbl) {
1449 if (cmd->recv_cq_handle != cmd->send_cq_handle) {
1450 rcq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd->recv_cq_handle,
1451 file->ucontext);
1452 if (!rcq) {
1453 ret = -EINVAL;
1454 goto err_put;
1460 if (has_sq)
1461 scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd->send_cq_handle,
1462 file->ucontext);
1463 if (!ind_tbl)
1464 rcq = rcq ?: scq;
1465 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, file->ucontext);
1466 if (!pd || (!scq && has_sq)) {
1467 ret = -EINVAL;
1468 goto err_put;
1471 device = pd->device;
1474 attr.event_handler = ib_uverbs_qp_event_handler;
1475 attr.qp_context = file;
1476 attr.send_cq = scq;
1477 attr.recv_cq = rcq;
1478 attr.srq = srq;
1479 attr.xrcd = xrcd;
1480 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR :
1481 IB_SIGNAL_REQ_WR;
1482 attr.qp_type = cmd->qp_type;
1483 attr.create_flags = 0;
1485 attr.cap.max_send_wr = cmd->max_send_wr;
1486 attr.cap.max_recv_wr = cmd->max_recv_wr;
1487 attr.cap.max_send_sge = cmd->max_send_sge;
1488 attr.cap.max_recv_sge = cmd->max_recv_sge;
1489 attr.cap.max_inline_data = cmd->max_inline_data;
1491 obj->uevent.events_reported = 0;
1492 INIT_LIST_HEAD(&obj->uevent.event_list);
1493 INIT_LIST_HEAD(&obj->mcast_list);
1495 if (cmd_sz >= offsetof(typeof(*cmd), create_flags) +
1496 sizeof(cmd->create_flags))
1497 attr.create_flags = cmd->create_flags;
1499 if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
1500 IB_QP_CREATE_CROSS_CHANNEL |
1501 IB_QP_CREATE_MANAGED_SEND |
1502 IB_QP_CREATE_MANAGED_RECV |
1503 IB_QP_CREATE_SCATTER_FCS |
1504 IB_QP_CREATE_CVLAN_STRIPPING |
1505 IB_QP_CREATE_SOURCE_QPN |
1506 IB_QP_CREATE_PCI_WRITE_END_PADDING)) {
1507 ret = -EINVAL;
1508 goto err_put;
1511 if (attr.create_flags & IB_QP_CREATE_SOURCE_QPN) {
1512 if (!capable(CAP_NET_RAW)) {
1513 ret = -EPERM;
1514 goto err_put;
1517 attr.source_qpn = cmd->source_qpn;
1520 buf = (void *)cmd + sizeof(*cmd);
1521 if (cmd_sz > sizeof(*cmd))
1522 if (!(buf[0] == 0 && !memcmp(buf, buf + 1,
1523 cmd_sz - sizeof(*cmd) - 1))) {
1524 ret = -EINVAL;
1525 goto err_put;
1528 if (cmd->qp_type == IB_QPT_XRC_TGT)
1529 qp = ib_create_qp(pd, &attr);
1530 else
1531 qp = _ib_create_qp(device, pd, &attr, uhw,
1532 &obj->uevent.uobject);
1534 if (IS_ERR(qp)) {
1535 ret = PTR_ERR(qp);
1536 goto err_put;
1539 if (cmd->qp_type != IB_QPT_XRC_TGT) {
1540 ret = ib_create_qp_security(qp, device);
1541 if (ret)
1542 goto err_cb;
1544 qp->real_qp = qp;
1545 qp->pd = pd;
1546 qp->send_cq = attr.send_cq;
1547 qp->recv_cq = attr.recv_cq;
1548 qp->srq = attr.srq;
1549 qp->rwq_ind_tbl = ind_tbl;
1550 qp->event_handler = attr.event_handler;
1551 qp->qp_context = attr.qp_context;
1552 qp->qp_type = attr.qp_type;
1553 atomic_set(&qp->usecnt, 0);
1554 atomic_inc(&pd->usecnt);
1555 qp->port = 0;
1556 if (attr.send_cq)
1557 atomic_inc(&attr.send_cq->usecnt);
1558 if (attr.recv_cq)
1559 atomic_inc(&attr.recv_cq->usecnt);
1560 if (attr.srq)
1561 atomic_inc(&attr.srq->usecnt);
1562 if (ind_tbl)
1563 atomic_inc(&ind_tbl->usecnt);
1564 } else {
1565 /* It is done in _ib_create_qp for other QP types */
1566 qp->uobject = &obj->uevent.uobject;
1569 obj->uevent.uobject.object = qp;
1571 memset(&resp, 0, sizeof resp);
1572 resp.base.qpn = qp->qp_num;
1573 resp.base.qp_handle = obj->uevent.uobject.id;
1574 resp.base.max_recv_sge = attr.cap.max_recv_sge;
1575 resp.base.max_send_sge = attr.cap.max_send_sge;
1576 resp.base.max_recv_wr = attr.cap.max_recv_wr;
1577 resp.base.max_send_wr = attr.cap.max_send_wr;
1578 resp.base.max_inline_data = attr.cap.max_inline_data;
1580 resp.response_length = offsetof(typeof(resp), response_length) +
1581 sizeof(resp.response_length);
1583 ret = cb(file, &resp, ucore);
1584 if (ret)
1585 goto err_cb;
1587 if (xrcd) {
1588 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1589 uobject);
1590 atomic_inc(&obj->uxrcd->refcnt);
1591 uobj_put_read(xrcd_uobj);
1594 if (pd)
1595 uobj_put_obj_read(pd);
1596 if (scq)
1597 uobj_put_obj_read(scq);
1598 if (rcq && rcq != scq)
1599 uobj_put_obj_read(rcq);
1600 if (srq)
1601 uobj_put_obj_read(srq);
1602 if (ind_tbl)
1603 uobj_put_obj_read(ind_tbl);
1605 uobj_alloc_commit(&obj->uevent.uobject);
1607 return 0;
1608 err_cb:
1609 ib_destroy_qp(qp);
1611 err_put:
1612 if (!IS_ERR(xrcd_uobj))
1613 uobj_put_read(xrcd_uobj);
1614 if (pd)
1615 uobj_put_obj_read(pd);
1616 if (scq)
1617 uobj_put_obj_read(scq);
1618 if (rcq && rcq != scq)
1619 uobj_put_obj_read(rcq);
1620 if (srq)
1621 uobj_put_obj_read(srq);
1622 if (ind_tbl)
1623 uobj_put_obj_read(ind_tbl);
1625 uobj_alloc_abort(&obj->uevent.uobject);
1626 return ret;
1629 static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file,
1630 struct ib_uverbs_ex_create_qp_resp *resp,
1631 struct ib_udata *ucore)
1633 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1634 return -EFAULT;
1636 return 0;
1639 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1640 struct ib_device *ib_dev,
1641 const char __user *buf, int in_len,
1642 int out_len)
1644 struct ib_uverbs_create_qp cmd;
1645 struct ib_uverbs_ex_create_qp cmd_ex;
1646 struct ib_udata ucore;
1647 struct ib_udata uhw;
1648 ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp);
1649 int err;
1651 if (out_len < resp_size)
1652 return -ENOSPC;
1654 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1655 return -EFAULT;
1657 ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response),
1658 sizeof(cmd), resp_size);
1659 ib_uverbs_init_udata(&uhw, buf + sizeof(cmd),
1660 u64_to_user_ptr(cmd.response) + resp_size,
1661 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1662 out_len - resp_size);
1664 memset(&cmd_ex, 0, sizeof(cmd_ex));
1665 cmd_ex.user_handle = cmd.user_handle;
1666 cmd_ex.pd_handle = cmd.pd_handle;
1667 cmd_ex.send_cq_handle = cmd.send_cq_handle;
1668 cmd_ex.recv_cq_handle = cmd.recv_cq_handle;
1669 cmd_ex.srq_handle = cmd.srq_handle;
1670 cmd_ex.max_send_wr = cmd.max_send_wr;
1671 cmd_ex.max_recv_wr = cmd.max_recv_wr;
1672 cmd_ex.max_send_sge = cmd.max_send_sge;
1673 cmd_ex.max_recv_sge = cmd.max_recv_sge;
1674 cmd_ex.max_inline_data = cmd.max_inline_data;
1675 cmd_ex.sq_sig_all = cmd.sq_sig_all;
1676 cmd_ex.qp_type = cmd.qp_type;
1677 cmd_ex.is_srq = cmd.is_srq;
1679 err = create_qp(file, &ucore, &uhw, &cmd_ex,
1680 offsetof(typeof(cmd_ex), is_srq) +
1681 sizeof(cmd.is_srq), ib_uverbs_create_qp_cb,
1682 NULL);
1684 if (err)
1685 return err;
1687 return in_len;
1690 static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file,
1691 struct ib_uverbs_ex_create_qp_resp *resp,
1692 struct ib_udata *ucore)
1694 if (ib_copy_to_udata(ucore, resp, resp->response_length))
1695 return -EFAULT;
1697 return 0;
1700 int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file,
1701 struct ib_device *ib_dev,
1702 struct ib_udata *ucore,
1703 struct ib_udata *uhw)
1705 struct ib_uverbs_ex_create_qp_resp resp;
1706 struct ib_uverbs_ex_create_qp cmd = {0};
1707 int err;
1709 if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) +
1710 sizeof(cmd.comp_mask)))
1711 return -EINVAL;
1713 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
1714 if (err)
1715 return err;
1717 if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK)
1718 return -EINVAL;
1720 if (cmd.reserved)
1721 return -EINVAL;
1723 if (ucore->outlen < (offsetof(typeof(resp), response_length) +
1724 sizeof(resp.response_length)))
1725 return -ENOSPC;
1727 err = create_qp(file, ucore, uhw, &cmd,
1728 min(ucore->inlen, sizeof(cmd)),
1729 ib_uverbs_ex_create_qp_cb, NULL);
1731 if (err)
1732 return err;
1734 return 0;
1737 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
1738 struct ib_device *ib_dev,
1739 const char __user *buf, int in_len, int out_len)
1741 struct ib_uverbs_open_qp cmd;
1742 struct ib_uverbs_create_qp_resp resp;
1743 struct ib_udata udata;
1744 struct ib_uqp_object *obj;
1745 struct ib_xrcd *xrcd;
1746 struct ib_uobject *uninitialized_var(xrcd_uobj);
1747 struct ib_qp *qp;
1748 struct ib_qp_open_attr attr;
1749 int ret;
1751 if (out_len < sizeof resp)
1752 return -ENOSPC;
1754 if (copy_from_user(&cmd, buf, sizeof cmd))
1755 return -EFAULT;
1757 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
1758 u64_to_user_ptr(cmd.response) + sizeof(resp),
1759 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1760 out_len - sizeof(resp));
1762 obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP,
1763 file->ucontext);
1764 if (IS_ERR(obj))
1765 return PTR_ERR(obj);
1767 xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd.pd_handle,
1768 file->ucontext);
1769 if (IS_ERR(xrcd_uobj)) {
1770 ret = -EINVAL;
1771 goto err_put;
1774 xrcd = (struct ib_xrcd *)xrcd_uobj->object;
1775 if (!xrcd) {
1776 ret = -EINVAL;
1777 goto err_xrcd;
1780 attr.event_handler = ib_uverbs_qp_event_handler;
1781 attr.qp_context = file;
1782 attr.qp_num = cmd.qpn;
1783 attr.qp_type = cmd.qp_type;
1785 obj->uevent.events_reported = 0;
1786 INIT_LIST_HEAD(&obj->uevent.event_list);
1787 INIT_LIST_HEAD(&obj->mcast_list);
1789 qp = ib_open_qp(xrcd, &attr);
1790 if (IS_ERR(qp)) {
1791 ret = PTR_ERR(qp);
1792 goto err_xrcd;
1795 obj->uevent.uobject.object = qp;
1796 obj->uevent.uobject.user_handle = cmd.user_handle;
1798 memset(&resp, 0, sizeof resp);
1799 resp.qpn = qp->qp_num;
1800 resp.qp_handle = obj->uevent.uobject.id;
1802 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
1803 ret = -EFAULT;
1804 goto err_destroy;
1807 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
1808 atomic_inc(&obj->uxrcd->refcnt);
1809 qp->uobject = &obj->uevent.uobject;
1810 uobj_put_read(xrcd_uobj);
1813 uobj_alloc_commit(&obj->uevent.uobject);
1815 return in_len;
1817 err_destroy:
1818 ib_destroy_qp(qp);
1819 err_xrcd:
1820 uobj_put_read(xrcd_uobj);
1821 err_put:
1822 uobj_alloc_abort(&obj->uevent.uobject);
1823 return ret;
1826 static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr,
1827 struct rdma_ah_attr *rdma_attr)
1829 const struct ib_global_route *grh;
1831 uverb_attr->dlid = rdma_ah_get_dlid(rdma_attr);
1832 uverb_attr->sl = rdma_ah_get_sl(rdma_attr);
1833 uverb_attr->src_path_bits = rdma_ah_get_path_bits(rdma_attr);
1834 uverb_attr->static_rate = rdma_ah_get_static_rate(rdma_attr);
1835 uverb_attr->is_global = !!(rdma_ah_get_ah_flags(rdma_attr) &
1836 IB_AH_GRH);
1837 if (uverb_attr->is_global) {
1838 grh = rdma_ah_read_grh(rdma_attr);
1839 memcpy(uverb_attr->dgid, grh->dgid.raw, 16);
1840 uverb_attr->flow_label = grh->flow_label;
1841 uverb_attr->sgid_index = grh->sgid_index;
1842 uverb_attr->hop_limit = grh->hop_limit;
1843 uverb_attr->traffic_class = grh->traffic_class;
1845 uverb_attr->port_num = rdma_ah_get_port_num(rdma_attr);
1848 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
1849 struct ib_device *ib_dev,
1850 const char __user *buf, int in_len,
1851 int out_len)
1853 struct ib_uverbs_query_qp cmd;
1854 struct ib_uverbs_query_qp_resp resp;
1855 struct ib_qp *qp;
1856 struct ib_qp_attr *attr;
1857 struct ib_qp_init_attr *init_attr;
1858 int ret;
1860 if (copy_from_user(&cmd, buf, sizeof cmd))
1861 return -EFAULT;
1863 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1864 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1865 if (!attr || !init_attr) {
1866 ret = -ENOMEM;
1867 goto out;
1870 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext);
1871 if (!qp) {
1872 ret = -EINVAL;
1873 goto out;
1876 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1878 uobj_put_obj_read(qp);
1880 if (ret)
1881 goto out;
1883 memset(&resp, 0, sizeof resp);
1885 resp.qp_state = attr->qp_state;
1886 resp.cur_qp_state = attr->cur_qp_state;
1887 resp.path_mtu = attr->path_mtu;
1888 resp.path_mig_state = attr->path_mig_state;
1889 resp.qkey = attr->qkey;
1890 resp.rq_psn = attr->rq_psn;
1891 resp.sq_psn = attr->sq_psn;
1892 resp.dest_qp_num = attr->dest_qp_num;
1893 resp.qp_access_flags = attr->qp_access_flags;
1894 resp.pkey_index = attr->pkey_index;
1895 resp.alt_pkey_index = attr->alt_pkey_index;
1896 resp.sq_draining = attr->sq_draining;
1897 resp.max_rd_atomic = attr->max_rd_atomic;
1898 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic;
1899 resp.min_rnr_timer = attr->min_rnr_timer;
1900 resp.port_num = attr->port_num;
1901 resp.timeout = attr->timeout;
1902 resp.retry_cnt = attr->retry_cnt;
1903 resp.rnr_retry = attr->rnr_retry;
1904 resp.alt_port_num = attr->alt_port_num;
1905 resp.alt_timeout = attr->alt_timeout;
1907 copy_ah_attr_to_uverbs(&resp.dest, &attr->ah_attr);
1908 copy_ah_attr_to_uverbs(&resp.alt_dest, &attr->alt_ah_attr);
1910 resp.max_send_wr = init_attr->cap.max_send_wr;
1911 resp.max_recv_wr = init_attr->cap.max_recv_wr;
1912 resp.max_send_sge = init_attr->cap.max_send_sge;
1913 resp.max_recv_sge = init_attr->cap.max_recv_sge;
1914 resp.max_inline_data = init_attr->cap.max_inline_data;
1915 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1917 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
1918 ret = -EFAULT;
1920 out:
1921 kfree(attr);
1922 kfree(init_attr);
1924 return ret ? ret : in_len;
1927 /* Remove ignored fields set in the attribute mask */
1928 static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
1930 switch (qp_type) {
1931 case IB_QPT_XRC_INI:
1932 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
1933 case IB_QPT_XRC_TGT:
1934 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
1935 IB_QP_RNR_RETRY);
1936 default:
1937 return mask;
1941 static void copy_ah_attr_from_uverbs(struct ib_device *dev,
1942 struct rdma_ah_attr *rdma_attr,
1943 struct ib_uverbs_qp_dest *uverb_attr)
1945 rdma_attr->type = rdma_ah_find_type(dev, uverb_attr->port_num);
1946 if (uverb_attr->is_global) {
1947 rdma_ah_set_grh(rdma_attr, NULL,
1948 uverb_attr->flow_label,
1949 uverb_attr->sgid_index,
1950 uverb_attr->hop_limit,
1951 uverb_attr->traffic_class);
1952 rdma_ah_set_dgid_raw(rdma_attr, uverb_attr->dgid);
1953 } else {
1954 rdma_ah_set_ah_flags(rdma_attr, 0);
1956 rdma_ah_set_dlid(rdma_attr, uverb_attr->dlid);
1957 rdma_ah_set_sl(rdma_attr, uverb_attr->sl);
1958 rdma_ah_set_path_bits(rdma_attr, uverb_attr->src_path_bits);
1959 rdma_ah_set_static_rate(rdma_attr, uverb_attr->static_rate);
1960 rdma_ah_set_port_num(rdma_attr, uverb_attr->port_num);
1961 rdma_ah_set_make_grd(rdma_attr, false);
1964 static int modify_qp(struct ib_uverbs_file *file,
1965 struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata)
1967 struct ib_qp_attr *attr;
1968 struct ib_qp *qp;
1969 int ret;
1971 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1972 if (!attr)
1973 return -ENOMEM;
1975 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle, file->ucontext);
1976 if (!qp) {
1977 ret = -EINVAL;
1978 goto out;
1981 if ((cmd->base.attr_mask & IB_QP_PORT) &&
1982 !rdma_is_port_valid(qp->device, cmd->base.port_num)) {
1983 ret = -EINVAL;
1984 goto release_qp;
1987 if ((cmd->base.attr_mask & IB_QP_AV) &&
1988 !rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
1989 ret = -EINVAL;
1990 goto release_qp;
1993 if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
1994 (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) ||
1995 !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num))) {
1996 ret = -EINVAL;
1997 goto release_qp;
2000 if ((cmd->base.attr_mask & IB_QP_CUR_STATE &&
2001 cmd->base.cur_qp_state > IB_QPS_ERR) ||
2002 cmd->base.qp_state > IB_QPS_ERR) {
2003 ret = -EINVAL;
2004 goto release_qp;
2007 attr->qp_state = cmd->base.qp_state;
2008 attr->cur_qp_state = cmd->base.cur_qp_state;
2009 attr->path_mtu = cmd->base.path_mtu;
2010 attr->path_mig_state = cmd->base.path_mig_state;
2011 attr->qkey = cmd->base.qkey;
2012 attr->rq_psn = cmd->base.rq_psn;
2013 attr->sq_psn = cmd->base.sq_psn;
2014 attr->dest_qp_num = cmd->base.dest_qp_num;
2015 attr->qp_access_flags = cmd->base.qp_access_flags;
2016 attr->pkey_index = cmd->base.pkey_index;
2017 attr->alt_pkey_index = cmd->base.alt_pkey_index;
2018 attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
2019 attr->max_rd_atomic = cmd->base.max_rd_atomic;
2020 attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
2021 attr->min_rnr_timer = cmd->base.min_rnr_timer;
2022 attr->port_num = cmd->base.port_num;
2023 attr->timeout = cmd->base.timeout;
2024 attr->retry_cnt = cmd->base.retry_cnt;
2025 attr->rnr_retry = cmd->base.rnr_retry;
2026 attr->alt_port_num = cmd->base.alt_port_num;
2027 attr->alt_timeout = cmd->base.alt_timeout;
2028 attr->rate_limit = cmd->rate_limit;
2030 if (cmd->base.attr_mask & IB_QP_AV)
2031 copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
2032 &cmd->base.dest);
2034 if (cmd->base.attr_mask & IB_QP_ALT_PATH)
2035 copy_ah_attr_from_uverbs(qp->device, &attr->alt_ah_attr,
2036 &cmd->base.alt_dest);
2038 ret = ib_modify_qp_with_udata(qp, attr,
2039 modify_qp_mask(qp->qp_type,
2040 cmd->base.attr_mask),
2041 udata);
2043 release_qp:
2044 uobj_put_obj_read(qp);
2045 out:
2046 kfree(attr);
2048 return ret;
2051 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
2052 struct ib_device *ib_dev,
2053 const char __user *buf, int in_len,
2054 int out_len)
2056 struct ib_uverbs_ex_modify_qp cmd = {};
2057 struct ib_udata udata;
2058 int ret;
2060 if (copy_from_user(&cmd.base, buf, sizeof(cmd.base)))
2061 return -EFAULT;
2063 if (cmd.base.attr_mask &
2064 ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1))
2065 return -EOPNOTSUPP;
2067 ib_uverbs_init_udata(&udata, buf + sizeof(cmd.base), NULL,
2068 in_len - sizeof(cmd.base) - sizeof(struct ib_uverbs_cmd_hdr),
2069 out_len);
2071 ret = modify_qp(file, &cmd, &udata);
2072 if (ret)
2073 return ret;
2075 return in_len;
2078 int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file,
2079 struct ib_device *ib_dev,
2080 struct ib_udata *ucore,
2081 struct ib_udata *uhw)
2083 struct ib_uverbs_ex_modify_qp cmd = {};
2084 int ret;
2087 * Last bit is reserved for extending the attr_mask by
2088 * using another field.
2090 BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31));
2092 if (ucore->inlen < sizeof(cmd.base))
2093 return -EINVAL;
2095 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
2096 if (ret)
2097 return ret;
2099 if (cmd.base.attr_mask &
2100 ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1))
2101 return -EOPNOTSUPP;
2103 if (ucore->inlen > sizeof(cmd)) {
2104 if (!ib_is_udata_cleared(ucore, sizeof(cmd),
2105 ucore->inlen - sizeof(cmd)))
2106 return -EOPNOTSUPP;
2109 ret = modify_qp(file, &cmd, uhw);
2111 return ret;
2114 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
2115 struct ib_device *ib_dev,
2116 const char __user *buf, int in_len,
2117 int out_len)
2119 struct ib_uverbs_destroy_qp cmd;
2120 struct ib_uverbs_destroy_qp_resp resp;
2121 struct ib_uobject *uobj;
2122 struct ib_uqp_object *obj;
2123 int ret = -EINVAL;
2125 if (copy_from_user(&cmd, buf, sizeof cmd))
2126 return -EFAULT;
2128 memset(&resp, 0, sizeof resp);
2130 uobj = uobj_get_write(UVERBS_OBJECT_QP, cmd.qp_handle,
2131 file->ucontext);
2132 if (IS_ERR(uobj))
2133 return PTR_ERR(uobj);
2135 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
2137 * Make sure we don't free the memory in remove_commit as we still
2138 * needs the uobject memory to create the response.
2140 uverbs_uobject_get(uobj);
2142 ret = uobj_remove_commit(uobj);
2143 if (ret) {
2144 uverbs_uobject_put(uobj);
2145 return ret;
2148 resp.events_reported = obj->uevent.events_reported;
2149 uverbs_uobject_put(uobj);
2151 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
2152 return -EFAULT;
2154 return in_len;
2157 static void *alloc_wr(size_t wr_size, __u32 num_sge)
2159 if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) /
2160 sizeof (struct ib_sge))
2161 return NULL;
2163 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
2164 num_sge * sizeof (struct ib_sge), GFP_KERNEL);
2167 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2168 struct ib_device *ib_dev,
2169 const char __user *buf, int in_len,
2170 int out_len)
2172 struct ib_uverbs_post_send cmd;
2173 struct ib_uverbs_post_send_resp resp;
2174 struct ib_uverbs_send_wr *user_wr;
2175 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
2176 struct ib_qp *qp;
2177 int i, sg_ind;
2178 int is_ud;
2179 ssize_t ret = -EINVAL;
2180 size_t next_size;
2182 if (copy_from_user(&cmd, buf, sizeof cmd))
2183 return -EFAULT;
2185 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
2186 cmd.sge_count * sizeof (struct ib_uverbs_sge))
2187 return -EINVAL;
2189 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
2190 return -EINVAL;
2192 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2193 if (!user_wr)
2194 return -ENOMEM;
2196 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext);
2197 if (!qp)
2198 goto out;
2200 is_ud = qp->qp_type == IB_QPT_UD;
2201 sg_ind = 0;
2202 last = NULL;
2203 for (i = 0; i < cmd.wr_count; ++i) {
2204 if (copy_from_user(user_wr,
2205 buf + sizeof cmd + i * cmd.wqe_size,
2206 cmd.wqe_size)) {
2207 ret = -EFAULT;
2208 goto out_put;
2211 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2212 ret = -EINVAL;
2213 goto out_put;
2216 if (is_ud) {
2217 struct ib_ud_wr *ud;
2219 if (user_wr->opcode != IB_WR_SEND &&
2220 user_wr->opcode != IB_WR_SEND_WITH_IMM) {
2221 ret = -EINVAL;
2222 goto out_put;
2225 next_size = sizeof(*ud);
2226 ud = alloc_wr(next_size, user_wr->num_sge);
2227 if (!ud) {
2228 ret = -ENOMEM;
2229 goto out_put;
2232 ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH, user_wr->wr.ud.ah,
2233 file->ucontext);
2234 if (!ud->ah) {
2235 kfree(ud);
2236 ret = -EINVAL;
2237 goto out_put;
2239 ud->remote_qpn = user_wr->wr.ud.remote_qpn;
2240 ud->remote_qkey = user_wr->wr.ud.remote_qkey;
2242 next = &ud->wr;
2243 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2244 user_wr->opcode == IB_WR_RDMA_WRITE ||
2245 user_wr->opcode == IB_WR_RDMA_READ) {
2246 struct ib_rdma_wr *rdma;
2248 next_size = sizeof(*rdma);
2249 rdma = alloc_wr(next_size, user_wr->num_sge);
2250 if (!rdma) {
2251 ret = -ENOMEM;
2252 goto out_put;
2255 rdma->remote_addr = user_wr->wr.rdma.remote_addr;
2256 rdma->rkey = user_wr->wr.rdma.rkey;
2258 next = &rdma->wr;
2259 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2260 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2261 struct ib_atomic_wr *atomic;
2263 next_size = sizeof(*atomic);
2264 atomic = alloc_wr(next_size, user_wr->num_sge);
2265 if (!atomic) {
2266 ret = -ENOMEM;
2267 goto out_put;
2270 atomic->remote_addr = user_wr->wr.atomic.remote_addr;
2271 atomic->compare_add = user_wr->wr.atomic.compare_add;
2272 atomic->swap = user_wr->wr.atomic.swap;
2273 atomic->rkey = user_wr->wr.atomic.rkey;
2275 next = &atomic->wr;
2276 } else if (user_wr->opcode == IB_WR_SEND ||
2277 user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2278 user_wr->opcode == IB_WR_SEND_WITH_INV) {
2279 next_size = sizeof(*next);
2280 next = alloc_wr(next_size, user_wr->num_sge);
2281 if (!next) {
2282 ret = -ENOMEM;
2283 goto out_put;
2285 } else {
2286 ret = -EINVAL;
2287 goto out_put;
2290 if (user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2291 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
2292 next->ex.imm_data =
2293 (__be32 __force) user_wr->ex.imm_data;
2294 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) {
2295 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey;
2298 if (!last)
2299 wr = next;
2300 else
2301 last->next = next;
2302 last = next;
2304 next->next = NULL;
2305 next->wr_id = user_wr->wr_id;
2306 next->num_sge = user_wr->num_sge;
2307 next->opcode = user_wr->opcode;
2308 next->send_flags = user_wr->send_flags;
2310 if (next->num_sge) {
2311 next->sg_list = (void *) next +
2312 ALIGN(next_size, sizeof(struct ib_sge));
2313 if (copy_from_user(next->sg_list,
2314 buf + sizeof cmd +
2315 cmd.wr_count * cmd.wqe_size +
2316 sg_ind * sizeof (struct ib_sge),
2317 next->num_sge * sizeof (struct ib_sge))) {
2318 ret = -EFAULT;
2319 goto out_put;
2321 sg_ind += next->num_sge;
2322 } else
2323 next->sg_list = NULL;
2326 resp.bad_wr = 0;
2327 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
2328 if (ret)
2329 for (next = wr; next; next = next->next) {
2330 ++resp.bad_wr;
2331 if (next == bad_wr)
2332 break;
2335 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
2336 ret = -EFAULT;
2338 out_put:
2339 uobj_put_obj_read(qp);
2341 while (wr) {
2342 if (is_ud && ud_wr(wr)->ah)
2343 uobj_put_obj_read(ud_wr(wr)->ah);
2344 next = wr->next;
2345 kfree(wr);
2346 wr = next;
2349 out:
2350 kfree(user_wr);
2352 return ret ? ret : in_len;
2355 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2356 int in_len,
2357 u32 wr_count,
2358 u32 sge_count,
2359 u32 wqe_size)
2361 struct ib_uverbs_recv_wr *user_wr;
2362 struct ib_recv_wr *wr = NULL, *last, *next;
2363 int sg_ind;
2364 int i;
2365 int ret;
2367 if (in_len < wqe_size * wr_count +
2368 sge_count * sizeof (struct ib_uverbs_sge))
2369 return ERR_PTR(-EINVAL);
2371 if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2372 return ERR_PTR(-EINVAL);
2374 user_wr = kmalloc(wqe_size, GFP_KERNEL);
2375 if (!user_wr)
2376 return ERR_PTR(-ENOMEM);
2378 sg_ind = 0;
2379 last = NULL;
2380 for (i = 0; i < wr_count; ++i) {
2381 if (copy_from_user(user_wr, buf + i * wqe_size,
2382 wqe_size)) {
2383 ret = -EFAULT;
2384 goto err;
2387 if (user_wr->num_sge + sg_ind > sge_count) {
2388 ret = -EINVAL;
2389 goto err;
2392 if (user_wr->num_sge >=
2393 (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) /
2394 sizeof (struct ib_sge)) {
2395 ret = -EINVAL;
2396 goto err;
2399 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2400 user_wr->num_sge * sizeof (struct ib_sge),
2401 GFP_KERNEL);
2402 if (!next) {
2403 ret = -ENOMEM;
2404 goto err;
2407 if (!last)
2408 wr = next;
2409 else
2410 last->next = next;
2411 last = next;
2413 next->next = NULL;
2414 next->wr_id = user_wr->wr_id;
2415 next->num_sge = user_wr->num_sge;
2417 if (next->num_sge) {
2418 next->sg_list = (void *) next +
2419 ALIGN(sizeof *next, sizeof (struct ib_sge));
2420 if (copy_from_user(next->sg_list,
2421 buf + wr_count * wqe_size +
2422 sg_ind * sizeof (struct ib_sge),
2423 next->num_sge * sizeof (struct ib_sge))) {
2424 ret = -EFAULT;
2425 goto err;
2427 sg_ind += next->num_sge;
2428 } else
2429 next->sg_list = NULL;
2432 kfree(user_wr);
2433 return wr;
2435 err:
2436 kfree(user_wr);
2438 while (wr) {
2439 next = wr->next;
2440 kfree(wr);
2441 wr = next;
2444 return ERR_PTR(ret);
2447 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
2448 struct ib_device *ib_dev,
2449 const char __user *buf, int in_len,
2450 int out_len)
2452 struct ib_uverbs_post_recv cmd;
2453 struct ib_uverbs_post_recv_resp resp;
2454 struct ib_recv_wr *wr, *next, *bad_wr;
2455 struct ib_qp *qp;
2456 ssize_t ret = -EINVAL;
2458 if (copy_from_user(&cmd, buf, sizeof cmd))
2459 return -EFAULT;
2461 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2462 in_len - sizeof cmd, cmd.wr_count,
2463 cmd.sge_count, cmd.wqe_size);
2464 if (IS_ERR(wr))
2465 return PTR_ERR(wr);
2467 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext);
2468 if (!qp)
2469 goto out;
2471 resp.bad_wr = 0;
2472 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
2474 uobj_put_obj_read(qp);
2475 if (ret) {
2476 for (next = wr; next; next = next->next) {
2477 ++resp.bad_wr;
2478 if (next == bad_wr)
2479 break;
2483 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
2484 ret = -EFAULT;
2486 out:
2487 while (wr) {
2488 next = wr->next;
2489 kfree(wr);
2490 wr = next;
2493 return ret ? ret : in_len;
2496 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
2497 struct ib_device *ib_dev,
2498 const char __user *buf, int in_len,
2499 int out_len)
2501 struct ib_uverbs_post_srq_recv cmd;
2502 struct ib_uverbs_post_srq_recv_resp resp;
2503 struct ib_recv_wr *wr, *next, *bad_wr;
2504 struct ib_srq *srq;
2505 ssize_t ret = -EINVAL;
2507 if (copy_from_user(&cmd, buf, sizeof cmd))
2508 return -EFAULT;
2510 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2511 in_len - sizeof cmd, cmd.wr_count,
2512 cmd.sge_count, cmd.wqe_size);
2513 if (IS_ERR(wr))
2514 return PTR_ERR(wr);
2516 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file->ucontext);
2517 if (!srq)
2518 goto out;
2520 resp.bad_wr = 0;
2521 ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
2523 uobj_put_obj_read(srq);
2525 if (ret)
2526 for (next = wr; next; next = next->next) {
2527 ++resp.bad_wr;
2528 if (next == bad_wr)
2529 break;
2532 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
2533 ret = -EFAULT;
2535 out:
2536 while (wr) {
2537 next = wr->next;
2538 kfree(wr);
2539 wr = next;
2542 return ret ? ret : in_len;
2545 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
2546 struct ib_device *ib_dev,
2547 const char __user *buf, int in_len,
2548 int out_len)
2550 struct ib_uverbs_create_ah cmd;
2551 struct ib_uverbs_create_ah_resp resp;
2552 struct ib_uobject *uobj;
2553 struct ib_pd *pd;
2554 struct ib_ah *ah;
2555 struct rdma_ah_attr attr;
2556 int ret;
2557 struct ib_udata udata;
2559 if (out_len < sizeof resp)
2560 return -ENOSPC;
2562 if (copy_from_user(&cmd, buf, sizeof cmd))
2563 return -EFAULT;
2565 if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num))
2566 return -EINVAL;
2568 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
2569 u64_to_user_ptr(cmd.response) + sizeof(resp),
2570 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
2571 out_len - sizeof(resp));
2573 uobj = uobj_alloc(UVERBS_OBJECT_AH, file->ucontext);
2574 if (IS_ERR(uobj))
2575 return PTR_ERR(uobj);
2577 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext);
2578 if (!pd) {
2579 ret = -EINVAL;
2580 goto err;
2583 attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num);
2584 rdma_ah_set_make_grd(&attr, false);
2585 rdma_ah_set_dlid(&attr, cmd.attr.dlid);
2586 rdma_ah_set_sl(&attr, cmd.attr.sl);
2587 rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits);
2588 rdma_ah_set_static_rate(&attr, cmd.attr.static_rate);
2589 rdma_ah_set_port_num(&attr, cmd.attr.port_num);
2591 if (cmd.attr.is_global) {
2592 rdma_ah_set_grh(&attr, NULL, cmd.attr.grh.flow_label,
2593 cmd.attr.grh.sgid_index,
2594 cmd.attr.grh.hop_limit,
2595 cmd.attr.grh.traffic_class);
2596 rdma_ah_set_dgid_raw(&attr, cmd.attr.grh.dgid);
2597 } else {
2598 rdma_ah_set_ah_flags(&attr, 0);
2601 ah = rdma_create_user_ah(pd, &attr, &udata);
2602 if (IS_ERR(ah)) {
2603 ret = PTR_ERR(ah);
2604 goto err_put;
2607 ah->uobject = uobj;
2608 uobj->user_handle = cmd.user_handle;
2609 uobj->object = ah;
2611 resp.ah_handle = uobj->id;
2613 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
2614 ret = -EFAULT;
2615 goto err_copy;
2618 uobj_put_obj_read(pd);
2619 uobj_alloc_commit(uobj);
2621 return in_len;
2623 err_copy:
2624 rdma_destroy_ah(ah);
2626 err_put:
2627 uobj_put_obj_read(pd);
2629 err:
2630 uobj_alloc_abort(uobj);
2631 return ret;
2634 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
2635 struct ib_device *ib_dev,
2636 const char __user *buf, int in_len, int out_len)
2638 struct ib_uverbs_destroy_ah cmd;
2639 struct ib_uobject *uobj;
2640 int ret;
2642 if (copy_from_user(&cmd, buf, sizeof cmd))
2643 return -EFAULT;
2645 uobj = uobj_get_write(UVERBS_OBJECT_AH, cmd.ah_handle,
2646 file->ucontext);
2647 if (IS_ERR(uobj))
2648 return PTR_ERR(uobj);
2650 ret = uobj_remove_commit(uobj);
2651 return ret ?: in_len;
2654 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
2655 struct ib_device *ib_dev,
2656 const char __user *buf, int in_len,
2657 int out_len)
2659 struct ib_uverbs_attach_mcast cmd;
2660 struct ib_qp *qp;
2661 struct ib_uqp_object *obj;
2662 struct ib_uverbs_mcast_entry *mcast;
2663 int ret;
2665 if (copy_from_user(&cmd, buf, sizeof cmd))
2666 return -EFAULT;
2668 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext);
2669 if (!qp)
2670 return -EINVAL;
2672 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2674 mutex_lock(&obj->mcast_lock);
2675 list_for_each_entry(mcast, &obj->mcast_list, list)
2676 if (cmd.mlid == mcast->lid &&
2677 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2678 ret = 0;
2679 goto out_put;
2682 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2683 if (!mcast) {
2684 ret = -ENOMEM;
2685 goto out_put;
2688 mcast->lid = cmd.mlid;
2689 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2691 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2692 if (!ret)
2693 list_add_tail(&mcast->list, &obj->mcast_list);
2694 else
2695 kfree(mcast);
2697 out_put:
2698 mutex_unlock(&obj->mcast_lock);
2699 uobj_put_obj_read(qp);
2701 return ret ? ret : in_len;
2704 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
2705 struct ib_device *ib_dev,
2706 const char __user *buf, int in_len,
2707 int out_len)
2709 struct ib_uverbs_detach_mcast cmd;
2710 struct ib_uqp_object *obj;
2711 struct ib_qp *qp;
2712 struct ib_uverbs_mcast_entry *mcast;
2713 int ret = -EINVAL;
2714 bool found = false;
2716 if (copy_from_user(&cmd, buf, sizeof cmd))
2717 return -EFAULT;
2719 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext);
2720 if (!qp)
2721 return -EINVAL;
2723 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2724 mutex_lock(&obj->mcast_lock);
2726 list_for_each_entry(mcast, &obj->mcast_list, list)
2727 if (cmd.mlid == mcast->lid &&
2728 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2729 list_del(&mcast->list);
2730 kfree(mcast);
2731 found = true;
2732 break;
2735 if (!found) {
2736 ret = -EINVAL;
2737 goto out_put;
2740 ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid);
2742 out_put:
2743 mutex_unlock(&obj->mcast_lock);
2744 uobj_put_obj_read(qp);
2745 return ret ? ret : in_len;
2748 struct ib_uflow_resources {
2749 size_t max;
2750 size_t num;
2751 size_t collection_num;
2752 size_t counters_num;
2753 struct ib_counters **counters;
2754 struct ib_flow_action **collection;
2757 static struct ib_uflow_resources *flow_resources_alloc(size_t num_specs)
2759 struct ib_uflow_resources *resources;
2761 resources = kzalloc(sizeof(*resources), GFP_KERNEL);
2763 if (!resources)
2764 goto err_res;
2766 resources->counters =
2767 kcalloc(num_specs, sizeof(*resources->counters), GFP_KERNEL);
2769 if (!resources->counters)
2770 goto err_cnt;
2772 resources->collection =
2773 kcalloc(num_specs, sizeof(*resources->collection), GFP_KERNEL);
2775 if (!resources->collection)
2776 goto err_collection;
2778 resources->max = num_specs;
2780 return resources;
2782 err_collection:
2783 kfree(resources->counters);
2784 err_cnt:
2785 kfree(resources);
2786 err_res:
2787 return NULL;
2790 void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res)
2792 unsigned int i;
2794 for (i = 0; i < uflow_res->collection_num; i++)
2795 atomic_dec(&uflow_res->collection[i]->usecnt);
2797 for (i = 0; i < uflow_res->counters_num; i++)
2798 atomic_dec(&uflow_res->counters[i]->usecnt);
2800 kfree(uflow_res->collection);
2801 kfree(uflow_res->counters);
2802 kfree(uflow_res);
2805 static void flow_resources_add(struct ib_uflow_resources *uflow_res,
2806 enum ib_flow_spec_type type,
2807 void *ibobj)
2809 WARN_ON(uflow_res->num >= uflow_res->max);
2811 switch (type) {
2812 case IB_FLOW_SPEC_ACTION_HANDLE:
2813 atomic_inc(&((struct ib_flow_action *)ibobj)->usecnt);
2814 uflow_res->collection[uflow_res->collection_num++] =
2815 (struct ib_flow_action *)ibobj;
2816 break;
2817 case IB_FLOW_SPEC_ACTION_COUNT:
2818 atomic_inc(&((struct ib_counters *)ibobj)->usecnt);
2819 uflow_res->counters[uflow_res->counters_num++] =
2820 (struct ib_counters *)ibobj;
2821 break;
2822 default:
2823 WARN_ON(1);
2826 uflow_res->num++;
2829 static int kern_spec_to_ib_spec_action(struct ib_ucontext *ucontext,
2830 struct ib_uverbs_flow_spec *kern_spec,
2831 union ib_flow_spec *ib_spec,
2832 struct ib_uflow_resources *uflow_res)
2834 ib_spec->type = kern_spec->type;
2835 switch (ib_spec->type) {
2836 case IB_FLOW_SPEC_ACTION_TAG:
2837 if (kern_spec->flow_tag.size !=
2838 sizeof(struct ib_uverbs_flow_spec_action_tag))
2839 return -EINVAL;
2841 ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag);
2842 ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id;
2843 break;
2844 case IB_FLOW_SPEC_ACTION_DROP:
2845 if (kern_spec->drop.size !=
2846 sizeof(struct ib_uverbs_flow_spec_action_drop))
2847 return -EINVAL;
2849 ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop);
2850 break;
2851 case IB_FLOW_SPEC_ACTION_HANDLE:
2852 if (kern_spec->action.size !=
2853 sizeof(struct ib_uverbs_flow_spec_action_handle))
2854 return -EOPNOTSUPP;
2855 ib_spec->action.act = uobj_get_obj_read(flow_action,
2856 UVERBS_OBJECT_FLOW_ACTION,
2857 kern_spec->action.handle,
2858 ucontext);
2859 if (!ib_spec->action.act)
2860 return -EINVAL;
2861 ib_spec->action.size =
2862 sizeof(struct ib_flow_spec_action_handle);
2863 flow_resources_add(uflow_res,
2864 IB_FLOW_SPEC_ACTION_HANDLE,
2865 ib_spec->action.act);
2866 uobj_put_obj_read(ib_spec->action.act);
2867 break;
2868 case IB_FLOW_SPEC_ACTION_COUNT:
2869 if (kern_spec->flow_count.size !=
2870 sizeof(struct ib_uverbs_flow_spec_action_count))
2871 return -EINVAL;
2872 ib_spec->flow_count.counters =
2873 uobj_get_obj_read(counters,
2874 UVERBS_OBJECT_COUNTERS,
2875 kern_spec->flow_count.handle,
2876 ucontext);
2877 if (!ib_spec->flow_count.counters)
2878 return -EINVAL;
2879 ib_spec->flow_count.size =
2880 sizeof(struct ib_flow_spec_action_count);
2881 flow_resources_add(uflow_res,
2882 IB_FLOW_SPEC_ACTION_COUNT,
2883 ib_spec->flow_count.counters);
2884 uobj_put_obj_read(ib_spec->flow_count.counters);
2885 break;
2886 default:
2887 return -EINVAL;
2889 return 0;
2892 static size_t kern_spec_filter_sz(const struct ib_uverbs_flow_spec_hdr *spec)
2894 /* Returns user space filter size, includes padding */
2895 return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2;
2898 static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size,
2899 u16 ib_real_filter_sz)
2902 * User space filter structures must be 64 bit aligned, otherwise this
2903 * may pass, but we won't handle additional new attributes.
2906 if (kern_filter_size > ib_real_filter_sz) {
2907 if (memchr_inv(kern_spec_filter +
2908 ib_real_filter_sz, 0,
2909 kern_filter_size - ib_real_filter_sz))
2910 return -EINVAL;
2911 return ib_real_filter_sz;
2913 return kern_filter_size;
2916 int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
2917 const void *kern_spec_mask,
2918 const void *kern_spec_val,
2919 size_t kern_filter_sz,
2920 union ib_flow_spec *ib_spec)
2922 ssize_t actual_filter_sz;
2923 ssize_t ib_filter_sz;
2925 /* User flow spec size must be aligned to 4 bytes */
2926 if (kern_filter_sz != ALIGN(kern_filter_sz, 4))
2927 return -EINVAL;
2929 ib_spec->type = type;
2931 if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL))
2932 return -EINVAL;
2934 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
2935 case IB_FLOW_SPEC_ETH:
2936 ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
2937 actual_filter_sz = spec_filter_size(kern_spec_mask,
2938 kern_filter_sz,
2939 ib_filter_sz);
2940 if (actual_filter_sz <= 0)
2941 return -EINVAL;
2942 ib_spec->size = sizeof(struct ib_flow_spec_eth);
2943 memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz);
2944 memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz);
2945 break;
2946 case IB_FLOW_SPEC_IPV4:
2947 ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz);
2948 actual_filter_sz = spec_filter_size(kern_spec_mask,
2949 kern_filter_sz,
2950 ib_filter_sz);
2951 if (actual_filter_sz <= 0)
2952 return -EINVAL;
2953 ib_spec->size = sizeof(struct ib_flow_spec_ipv4);
2954 memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz);
2955 memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz);
2956 break;
2957 case IB_FLOW_SPEC_IPV6:
2958 ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz);
2959 actual_filter_sz = spec_filter_size(kern_spec_mask,
2960 kern_filter_sz,
2961 ib_filter_sz);
2962 if (actual_filter_sz <= 0)
2963 return -EINVAL;
2964 ib_spec->size = sizeof(struct ib_flow_spec_ipv6);
2965 memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz);
2966 memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz);
2968 if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) ||
2969 (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20))
2970 return -EINVAL;
2971 break;
2972 case IB_FLOW_SPEC_TCP:
2973 case IB_FLOW_SPEC_UDP:
2974 ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz);
2975 actual_filter_sz = spec_filter_size(kern_spec_mask,
2976 kern_filter_sz,
2977 ib_filter_sz);
2978 if (actual_filter_sz <= 0)
2979 return -EINVAL;
2980 ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp);
2981 memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz);
2982 memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
2983 break;
2984 case IB_FLOW_SPEC_VXLAN_TUNNEL:
2985 ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz);
2986 actual_filter_sz = spec_filter_size(kern_spec_mask,
2987 kern_filter_sz,
2988 ib_filter_sz);
2989 if (actual_filter_sz <= 0)
2990 return -EINVAL;
2991 ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel);
2992 memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz);
2993 memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz);
2995 if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) ||
2996 (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24))
2997 return -EINVAL;
2998 break;
2999 case IB_FLOW_SPEC_ESP:
3000 ib_filter_sz = offsetof(struct ib_flow_esp_filter, real_sz);
3001 actual_filter_sz = spec_filter_size(kern_spec_mask,
3002 kern_filter_sz,
3003 ib_filter_sz);
3004 if (actual_filter_sz <= 0)
3005 return -EINVAL;
3006 ib_spec->esp.size = sizeof(struct ib_flow_spec_esp);
3007 memcpy(&ib_spec->esp.val, kern_spec_val, actual_filter_sz);
3008 memcpy(&ib_spec->esp.mask, kern_spec_mask, actual_filter_sz);
3009 break;
3010 case IB_FLOW_SPEC_GRE:
3011 ib_filter_sz = offsetof(struct ib_flow_gre_filter, real_sz);
3012 actual_filter_sz = spec_filter_size(kern_spec_mask,
3013 kern_filter_sz,
3014 ib_filter_sz);
3015 if (actual_filter_sz <= 0)
3016 return -EINVAL;
3017 ib_spec->gre.size = sizeof(struct ib_flow_spec_gre);
3018 memcpy(&ib_spec->gre.val, kern_spec_val, actual_filter_sz);
3019 memcpy(&ib_spec->gre.mask, kern_spec_mask, actual_filter_sz);
3020 break;
3021 case IB_FLOW_SPEC_MPLS:
3022 ib_filter_sz = offsetof(struct ib_flow_mpls_filter, real_sz);
3023 actual_filter_sz = spec_filter_size(kern_spec_mask,
3024 kern_filter_sz,
3025 ib_filter_sz);
3026 if (actual_filter_sz <= 0)
3027 return -EINVAL;
3028 ib_spec->mpls.size = sizeof(struct ib_flow_spec_mpls);
3029 memcpy(&ib_spec->mpls.val, kern_spec_val, actual_filter_sz);
3030 memcpy(&ib_spec->mpls.mask, kern_spec_mask, actual_filter_sz);
3031 break;
3032 default:
3033 return -EINVAL;
3035 return 0;
3038 static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
3039 union ib_flow_spec *ib_spec)
3041 ssize_t kern_filter_sz;
3042 void *kern_spec_mask;
3043 void *kern_spec_val;
3045 if (kern_spec->reserved)
3046 return -EINVAL;
3048 kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr);
3050 kern_spec_val = (void *)kern_spec +
3051 sizeof(struct ib_uverbs_flow_spec_hdr);
3052 kern_spec_mask = kern_spec_val + kern_filter_sz;
3054 return ib_uverbs_kern_spec_to_ib_spec_filter(kern_spec->type,
3055 kern_spec_mask,
3056 kern_spec_val,
3057 kern_filter_sz, ib_spec);
3060 static int kern_spec_to_ib_spec(struct ib_ucontext *ucontext,
3061 struct ib_uverbs_flow_spec *kern_spec,
3062 union ib_flow_spec *ib_spec,
3063 struct ib_uflow_resources *uflow_res)
3065 if (kern_spec->reserved)
3066 return -EINVAL;
3068 if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG)
3069 return kern_spec_to_ib_spec_action(ucontext, kern_spec, ib_spec,
3070 uflow_res);
3071 else
3072 return kern_spec_to_ib_spec_filter(kern_spec, ib_spec);
3075 int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,
3076 struct ib_device *ib_dev,
3077 struct ib_udata *ucore,
3078 struct ib_udata *uhw)
3080 struct ib_uverbs_ex_create_wq cmd = {};
3081 struct ib_uverbs_ex_create_wq_resp resp = {};
3082 struct ib_uwq_object *obj;
3083 int err = 0;
3084 struct ib_cq *cq;
3085 struct ib_pd *pd;
3086 struct ib_wq *wq;
3087 struct ib_wq_init_attr wq_init_attr = {};
3088 size_t required_cmd_sz;
3089 size_t required_resp_len;
3091 required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge);
3092 required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn);
3094 if (ucore->inlen < required_cmd_sz)
3095 return -EINVAL;
3097 if (ucore->outlen < required_resp_len)
3098 return -ENOSPC;
3100 if (ucore->inlen > sizeof(cmd) &&
3101 !ib_is_udata_cleared(ucore, sizeof(cmd),
3102 ucore->inlen - sizeof(cmd)))
3103 return -EOPNOTSUPP;
3105 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3106 if (err)
3107 return err;
3109 if (cmd.comp_mask)
3110 return -EOPNOTSUPP;
3112 obj = (struct ib_uwq_object *)uobj_alloc(UVERBS_OBJECT_WQ,
3113 file->ucontext);
3114 if (IS_ERR(obj))
3115 return PTR_ERR(obj);
3117 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file->ucontext);
3118 if (!pd) {
3119 err = -EINVAL;
3120 goto err_uobj;
3123 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext);
3124 if (!cq) {
3125 err = -EINVAL;
3126 goto err_put_pd;
3129 wq_init_attr.cq = cq;
3130 wq_init_attr.max_sge = cmd.max_sge;
3131 wq_init_attr.max_wr = cmd.max_wr;
3132 wq_init_attr.wq_context = file;
3133 wq_init_attr.wq_type = cmd.wq_type;
3134 wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
3135 if (ucore->inlen >= (offsetof(typeof(cmd), create_flags) +
3136 sizeof(cmd.create_flags)))
3137 wq_init_attr.create_flags = cmd.create_flags;
3138 obj->uevent.events_reported = 0;
3139 INIT_LIST_HEAD(&obj->uevent.event_list);
3141 if (!pd->device->create_wq) {
3142 err = -EOPNOTSUPP;
3143 goto err_put_cq;
3145 wq = pd->device->create_wq(pd, &wq_init_attr, uhw);
3146 if (IS_ERR(wq)) {
3147 err = PTR_ERR(wq);
3148 goto err_put_cq;
3151 wq->uobject = &obj->uevent.uobject;
3152 obj->uevent.uobject.object = wq;
3153 wq->wq_type = wq_init_attr.wq_type;
3154 wq->cq = cq;
3155 wq->pd = pd;
3156 wq->device = pd->device;
3157 wq->wq_context = wq_init_attr.wq_context;
3158 atomic_set(&wq->usecnt, 0);
3159 atomic_inc(&pd->usecnt);
3160 atomic_inc(&cq->usecnt);
3161 wq->uobject = &obj->uevent.uobject;
3162 obj->uevent.uobject.object = wq;
3164 memset(&resp, 0, sizeof(resp));
3165 resp.wq_handle = obj->uevent.uobject.id;
3166 resp.max_sge = wq_init_attr.max_sge;
3167 resp.max_wr = wq_init_attr.max_wr;
3168 resp.wqn = wq->wq_num;
3169 resp.response_length = required_resp_len;
3170 err = ib_copy_to_udata(ucore,
3171 &resp, resp.response_length);
3172 if (err)
3173 goto err_copy;
3175 uobj_put_obj_read(pd);
3176 uobj_put_obj_read(cq);
3177 uobj_alloc_commit(&obj->uevent.uobject);
3178 return 0;
3180 err_copy:
3181 ib_destroy_wq(wq);
3182 err_put_cq:
3183 uobj_put_obj_read(cq);
3184 err_put_pd:
3185 uobj_put_obj_read(pd);
3186 err_uobj:
3187 uobj_alloc_abort(&obj->uevent.uobject);
3189 return err;
3192 int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
3193 struct ib_device *ib_dev,
3194 struct ib_udata *ucore,
3195 struct ib_udata *uhw)
3197 struct ib_uverbs_ex_destroy_wq cmd = {};
3198 struct ib_uverbs_ex_destroy_wq_resp resp = {};
3199 struct ib_uobject *uobj;
3200 struct ib_uwq_object *obj;
3201 size_t required_cmd_sz;
3202 size_t required_resp_len;
3203 int ret;
3205 required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle);
3206 required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
3208 if (ucore->inlen < required_cmd_sz)
3209 return -EINVAL;
3211 if (ucore->outlen < required_resp_len)
3212 return -ENOSPC;
3214 if (ucore->inlen > sizeof(cmd) &&
3215 !ib_is_udata_cleared(ucore, sizeof(cmd),
3216 ucore->inlen - sizeof(cmd)))
3217 return -EOPNOTSUPP;
3219 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3220 if (ret)
3221 return ret;
3223 if (cmd.comp_mask)
3224 return -EOPNOTSUPP;
3226 resp.response_length = required_resp_len;
3227 uobj = uobj_get_write(UVERBS_OBJECT_WQ, cmd.wq_handle,
3228 file->ucontext);
3229 if (IS_ERR(uobj))
3230 return PTR_ERR(uobj);
3232 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
3234 * Make sure we don't free the memory in remove_commit as we still
3235 * needs the uobject memory to create the response.
3237 uverbs_uobject_get(uobj);
3239 ret = uobj_remove_commit(uobj);
3240 resp.events_reported = obj->uevent.events_reported;
3241 uverbs_uobject_put(uobj);
3242 if (ret)
3243 return ret;
3245 return ib_copy_to_udata(ucore, &resp, resp.response_length);
3248 int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file,
3249 struct ib_device *ib_dev,
3250 struct ib_udata *ucore,
3251 struct ib_udata *uhw)
3253 struct ib_uverbs_ex_modify_wq cmd = {};
3254 struct ib_wq *wq;
3255 struct ib_wq_attr wq_attr = {};
3256 size_t required_cmd_sz;
3257 int ret;
3259 required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state);
3260 if (ucore->inlen < required_cmd_sz)
3261 return -EINVAL;
3263 if (ucore->inlen > sizeof(cmd) &&
3264 !ib_is_udata_cleared(ucore, sizeof(cmd),
3265 ucore->inlen - sizeof(cmd)))
3266 return -EOPNOTSUPP;
3268 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3269 if (ret)
3270 return ret;
3272 if (!cmd.attr_mask)
3273 return -EINVAL;
3275 if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS))
3276 return -EINVAL;
3278 wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, file->ucontext);
3279 if (!wq)
3280 return -EINVAL;
3282 wq_attr.curr_wq_state = cmd.curr_wq_state;
3283 wq_attr.wq_state = cmd.wq_state;
3284 if (cmd.attr_mask & IB_WQ_FLAGS) {
3285 wq_attr.flags = cmd.flags;
3286 wq_attr.flags_mask = cmd.flags_mask;
3288 if (!wq->device->modify_wq) {
3289 ret = -EOPNOTSUPP;
3290 goto out;
3292 ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw);
3293 out:
3294 uobj_put_obj_read(wq);
3295 return ret;
3298 int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,
3299 struct ib_device *ib_dev,
3300 struct ib_udata *ucore,
3301 struct ib_udata *uhw)
3303 struct ib_uverbs_ex_create_rwq_ind_table cmd = {};
3304 struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {};
3305 struct ib_uobject *uobj;
3306 int err = 0;
3307 struct ib_rwq_ind_table_init_attr init_attr = {};
3308 struct ib_rwq_ind_table *rwq_ind_tbl;
3309 struct ib_wq **wqs = NULL;
3310 u32 *wqs_handles = NULL;
3311 struct ib_wq *wq = NULL;
3312 int i, j, num_read_wqs;
3313 u32 num_wq_handles;
3314 u32 expected_in_size;
3315 size_t required_cmd_sz_header;
3316 size_t required_resp_len;
3318 required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size);
3319 required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num);
3321 if (ucore->inlen < required_cmd_sz_header)
3322 return -EINVAL;
3324 if (ucore->outlen < required_resp_len)
3325 return -ENOSPC;
3327 err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header);
3328 if (err)
3329 return err;
3331 ucore->inbuf += required_cmd_sz_header;
3332 ucore->inlen -= required_cmd_sz_header;
3334 if (cmd.comp_mask)
3335 return -EOPNOTSUPP;
3337 if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE)
3338 return -EINVAL;
3340 num_wq_handles = 1 << cmd.log_ind_tbl_size;
3341 expected_in_size = num_wq_handles * sizeof(__u32);
3342 if (num_wq_handles == 1)
3343 /* input size for wq handles is u64 aligned */
3344 expected_in_size += sizeof(__u32);
3346 if (ucore->inlen < expected_in_size)
3347 return -EINVAL;
3349 if (ucore->inlen > expected_in_size &&
3350 !ib_is_udata_cleared(ucore, expected_in_size,
3351 ucore->inlen - expected_in_size))
3352 return -EOPNOTSUPP;
3354 wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles),
3355 GFP_KERNEL);
3356 if (!wqs_handles)
3357 return -ENOMEM;
3359 err = ib_copy_from_udata(wqs_handles, ucore,
3360 num_wq_handles * sizeof(__u32));
3361 if (err)
3362 goto err_free;
3364 wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL);
3365 if (!wqs) {
3366 err = -ENOMEM;
3367 goto err_free;
3370 for (num_read_wqs = 0; num_read_wqs < num_wq_handles;
3371 num_read_wqs++) {
3372 wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, wqs_handles[num_read_wqs],
3373 file->ucontext);
3374 if (!wq) {
3375 err = -EINVAL;
3376 goto put_wqs;
3379 wqs[num_read_wqs] = wq;
3382 uobj = uobj_alloc(UVERBS_OBJECT_RWQ_IND_TBL, file->ucontext);
3383 if (IS_ERR(uobj)) {
3384 err = PTR_ERR(uobj);
3385 goto put_wqs;
3388 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
3389 init_attr.ind_tbl = wqs;
3391 if (!ib_dev->create_rwq_ind_table) {
3392 err = -EOPNOTSUPP;
3393 goto err_uobj;
3395 rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw);
3397 if (IS_ERR(rwq_ind_tbl)) {
3398 err = PTR_ERR(rwq_ind_tbl);
3399 goto err_uobj;
3402 rwq_ind_tbl->ind_tbl = wqs;
3403 rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size;
3404 rwq_ind_tbl->uobject = uobj;
3405 uobj->object = rwq_ind_tbl;
3406 rwq_ind_tbl->device = ib_dev;
3407 atomic_set(&rwq_ind_tbl->usecnt, 0);
3409 for (i = 0; i < num_wq_handles; i++)
3410 atomic_inc(&wqs[i]->usecnt);
3412 resp.ind_tbl_handle = uobj->id;
3413 resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num;
3414 resp.response_length = required_resp_len;
3416 err = ib_copy_to_udata(ucore,
3417 &resp, resp.response_length);
3418 if (err)
3419 goto err_copy;
3421 kfree(wqs_handles);
3423 for (j = 0; j < num_read_wqs; j++)
3424 uobj_put_obj_read(wqs[j]);
3426 uobj_alloc_commit(uobj);
3427 return 0;
3429 err_copy:
3430 ib_destroy_rwq_ind_table(rwq_ind_tbl);
3431 err_uobj:
3432 uobj_alloc_abort(uobj);
3433 put_wqs:
3434 for (j = 0; j < num_read_wqs; j++)
3435 uobj_put_obj_read(wqs[j]);
3436 err_free:
3437 kfree(wqs_handles);
3438 kfree(wqs);
3439 return err;
3442 int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file,
3443 struct ib_device *ib_dev,
3444 struct ib_udata *ucore,
3445 struct ib_udata *uhw)
3447 struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {};
3448 struct ib_uobject *uobj;
3449 int ret;
3450 size_t required_cmd_sz;
3452 required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle);
3454 if (ucore->inlen < required_cmd_sz)
3455 return -EINVAL;
3457 if (ucore->inlen > sizeof(cmd) &&
3458 !ib_is_udata_cleared(ucore, sizeof(cmd),
3459 ucore->inlen - sizeof(cmd)))
3460 return -EOPNOTSUPP;
3462 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3463 if (ret)
3464 return ret;
3466 if (cmd.comp_mask)
3467 return -EOPNOTSUPP;
3469 uobj = uobj_get_write(UVERBS_OBJECT_RWQ_IND_TBL, cmd.ind_tbl_handle,
3470 file->ucontext);
3471 if (IS_ERR(uobj))
3472 return PTR_ERR(uobj);
3474 return uobj_remove_commit(uobj);
3477 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
3478 struct ib_device *ib_dev,
3479 struct ib_udata *ucore,
3480 struct ib_udata *uhw)
3482 struct ib_uverbs_create_flow cmd;
3483 struct ib_uverbs_create_flow_resp resp;
3484 struct ib_uobject *uobj;
3485 struct ib_uflow_object *uflow;
3486 struct ib_flow *flow_id;
3487 struct ib_uverbs_flow_attr *kern_flow_attr;
3488 struct ib_flow_attr *flow_attr;
3489 struct ib_qp *qp;
3490 struct ib_uflow_resources *uflow_res;
3491 int err = 0;
3492 void *kern_spec;
3493 void *ib_spec;
3494 int i;
3496 if (ucore->inlen < sizeof(cmd))
3497 return -EINVAL;
3499 if (ucore->outlen < sizeof(resp))
3500 return -ENOSPC;
3502 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3503 if (err)
3504 return err;
3506 ucore->inbuf += sizeof(cmd);
3507 ucore->inlen -= sizeof(cmd);
3509 if (cmd.comp_mask)
3510 return -EINVAL;
3512 if (!capable(CAP_NET_RAW))
3513 return -EPERM;
3515 if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED)
3516 return -EINVAL;
3518 if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
3519 ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) ||
3520 (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT)))
3521 return -EINVAL;
3523 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
3524 return -EINVAL;
3526 if (cmd.flow_attr.size > ucore->inlen ||
3527 cmd.flow_attr.size >
3528 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
3529 return -EINVAL;
3531 if (cmd.flow_attr.reserved[0] ||
3532 cmd.flow_attr.reserved[1])
3533 return -EINVAL;
3535 if (cmd.flow_attr.num_of_specs) {
3536 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
3537 GFP_KERNEL);
3538 if (!kern_flow_attr)
3539 return -ENOMEM;
3541 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
3542 err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
3543 cmd.flow_attr.size);
3544 if (err)
3545 goto err_free_attr;
3546 } else {
3547 kern_flow_attr = &cmd.flow_attr;
3550 uobj = uobj_alloc(UVERBS_OBJECT_FLOW, file->ucontext);
3551 if (IS_ERR(uobj)) {
3552 err = PTR_ERR(uobj);
3553 goto err_free_attr;
3556 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file->ucontext);
3557 if (!qp) {
3558 err = -EINVAL;
3559 goto err_uobj;
3562 flow_attr = kzalloc(struct_size(flow_attr, flows,
3563 cmd.flow_attr.num_of_specs), GFP_KERNEL);
3564 if (!flow_attr) {
3565 err = -ENOMEM;
3566 goto err_put;
3568 uflow_res = flow_resources_alloc(cmd.flow_attr.num_of_specs);
3569 if (!uflow_res) {
3570 err = -ENOMEM;
3571 goto err_free_flow_attr;
3574 flow_attr->type = kern_flow_attr->type;
3575 flow_attr->priority = kern_flow_attr->priority;
3576 flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
3577 flow_attr->port = kern_flow_attr->port;
3578 flow_attr->flags = kern_flow_attr->flags;
3579 flow_attr->size = sizeof(*flow_attr);
3581 kern_spec = kern_flow_attr + 1;
3582 ib_spec = flow_attr + 1;
3583 for (i = 0; i < flow_attr->num_of_specs &&
3584 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
3585 cmd.flow_attr.size >=
3586 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
3587 err = kern_spec_to_ib_spec(file->ucontext, kern_spec, ib_spec,
3588 uflow_res);
3589 if (err)
3590 goto err_free;
3592 flow_attr->size +=
3593 ((union ib_flow_spec *) ib_spec)->size;
3594 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
3595 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
3596 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
3598 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
3599 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
3600 i, cmd.flow_attr.size);
3601 err = -EINVAL;
3602 goto err_free;
3605 flow_id = qp->device->create_flow(qp, flow_attr,
3606 IB_FLOW_DOMAIN_USER, uhw);
3608 if (IS_ERR(flow_id)) {
3609 err = PTR_ERR(flow_id);
3610 goto err_free;
3612 atomic_inc(&qp->usecnt);
3613 flow_id->qp = qp;
3614 flow_id->uobject = uobj;
3615 uobj->object = flow_id;
3616 uflow = container_of(uobj, typeof(*uflow), uobject);
3617 uflow->resources = uflow_res;
3619 memset(&resp, 0, sizeof(resp));
3620 resp.flow_handle = uobj->id;
3622 err = ib_copy_to_udata(ucore,
3623 &resp, sizeof(resp));
3624 if (err)
3625 goto err_copy;
3627 uobj_put_obj_read(qp);
3628 uobj_alloc_commit(uobj);
3629 kfree(flow_attr);
3630 if (cmd.flow_attr.num_of_specs)
3631 kfree(kern_flow_attr);
3632 return 0;
3633 err_copy:
3634 ib_destroy_flow(flow_id);
3635 err_free:
3636 ib_uverbs_flow_resources_free(uflow_res);
3637 err_free_flow_attr:
3638 kfree(flow_attr);
3639 err_put:
3640 uobj_put_obj_read(qp);
3641 err_uobj:
3642 uobj_alloc_abort(uobj);
3643 err_free_attr:
3644 if (cmd.flow_attr.num_of_specs)
3645 kfree(kern_flow_attr);
3646 return err;
3649 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
3650 struct ib_device *ib_dev,
3651 struct ib_udata *ucore,
3652 struct ib_udata *uhw)
3654 struct ib_uverbs_destroy_flow cmd;
3655 struct ib_uobject *uobj;
3656 int ret;
3658 if (ucore->inlen < sizeof(cmd))
3659 return -EINVAL;
3661 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3662 if (ret)
3663 return ret;
3665 if (cmd.comp_mask)
3666 return -EINVAL;
3668 uobj = uobj_get_write(UVERBS_OBJECT_FLOW, cmd.flow_handle,
3669 file->ucontext);
3670 if (IS_ERR(uobj))
3671 return PTR_ERR(uobj);
3673 ret = uobj_remove_commit(uobj);
3674 return ret;
3677 static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
3678 struct ib_device *ib_dev,
3679 struct ib_uverbs_create_xsrq *cmd,
3680 struct ib_udata *udata)
3682 struct ib_uverbs_create_srq_resp resp;
3683 struct ib_usrq_object *obj;
3684 struct ib_pd *pd;
3685 struct ib_srq *srq;
3686 struct ib_uobject *uninitialized_var(xrcd_uobj);
3687 struct ib_srq_init_attr attr;
3688 int ret;
3690 obj = (struct ib_usrq_object *)uobj_alloc(UVERBS_OBJECT_SRQ,
3691 file->ucontext);
3692 if (IS_ERR(obj))
3693 return PTR_ERR(obj);
3695 if (cmd->srq_type == IB_SRQT_TM)
3696 attr.ext.tag_matching.max_num_tags = cmd->max_num_tags;
3698 if (cmd->srq_type == IB_SRQT_XRC) {
3699 xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->xrcd_handle,
3700 file->ucontext);
3701 if (IS_ERR(xrcd_uobj)) {
3702 ret = -EINVAL;
3703 goto err;
3706 attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object;
3707 if (!attr.ext.xrc.xrcd) {
3708 ret = -EINVAL;
3709 goto err_put_xrcd;
3712 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
3713 atomic_inc(&obj->uxrcd->refcnt);
3716 if (ib_srq_has_cq(cmd->srq_type)) {
3717 attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd->cq_handle,
3718 file->ucontext);
3719 if (!attr.ext.cq) {
3720 ret = -EINVAL;
3721 goto err_put_xrcd;
3725 pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, file->ucontext);
3726 if (!pd) {
3727 ret = -EINVAL;
3728 goto err_put_cq;
3731 attr.event_handler = ib_uverbs_srq_event_handler;
3732 attr.srq_context = file;
3733 attr.srq_type = cmd->srq_type;
3734 attr.attr.max_wr = cmd->max_wr;
3735 attr.attr.max_sge = cmd->max_sge;
3736 attr.attr.srq_limit = cmd->srq_limit;
3738 obj->uevent.events_reported = 0;
3739 INIT_LIST_HEAD(&obj->uevent.event_list);
3741 srq = pd->device->create_srq(pd, &attr, udata);
3742 if (IS_ERR(srq)) {
3743 ret = PTR_ERR(srq);
3744 goto err_put;
3747 srq->device = pd->device;
3748 srq->pd = pd;
3749 srq->srq_type = cmd->srq_type;
3750 srq->uobject = &obj->uevent.uobject;
3751 srq->event_handler = attr.event_handler;
3752 srq->srq_context = attr.srq_context;
3754 if (ib_srq_has_cq(cmd->srq_type)) {
3755 srq->ext.cq = attr.ext.cq;
3756 atomic_inc(&attr.ext.cq->usecnt);
3759 if (cmd->srq_type == IB_SRQT_XRC) {
3760 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
3761 atomic_inc(&attr.ext.xrc.xrcd->usecnt);
3764 atomic_inc(&pd->usecnt);
3765 atomic_set(&srq->usecnt, 0);
3767 obj->uevent.uobject.object = srq;
3768 obj->uevent.uobject.user_handle = cmd->user_handle;
3770 memset(&resp, 0, sizeof resp);
3771 resp.srq_handle = obj->uevent.uobject.id;
3772 resp.max_wr = attr.attr.max_wr;
3773 resp.max_sge = attr.attr.max_sge;
3774 if (cmd->srq_type == IB_SRQT_XRC)
3775 resp.srqn = srq->ext.xrc.srq_num;
3777 if (copy_to_user(u64_to_user_ptr(cmd->response),
3778 &resp, sizeof resp)) {
3779 ret = -EFAULT;
3780 goto err_copy;
3783 if (cmd->srq_type == IB_SRQT_XRC)
3784 uobj_put_read(xrcd_uobj);
3786 if (ib_srq_has_cq(cmd->srq_type))
3787 uobj_put_obj_read(attr.ext.cq);
3789 uobj_put_obj_read(pd);
3790 uobj_alloc_commit(&obj->uevent.uobject);
3792 return 0;
3794 err_copy:
3795 ib_destroy_srq(srq);
3797 err_put:
3798 uobj_put_obj_read(pd);
3800 err_put_cq:
3801 if (ib_srq_has_cq(cmd->srq_type))
3802 uobj_put_obj_read(attr.ext.cq);
3804 err_put_xrcd:
3805 if (cmd->srq_type == IB_SRQT_XRC) {
3806 atomic_dec(&obj->uxrcd->refcnt);
3807 uobj_put_read(xrcd_uobj);
3810 err:
3811 uobj_alloc_abort(&obj->uevent.uobject);
3812 return ret;
3815 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
3816 struct ib_device *ib_dev,
3817 const char __user *buf, int in_len,
3818 int out_len)
3820 struct ib_uverbs_create_srq cmd;
3821 struct ib_uverbs_create_xsrq xcmd;
3822 struct ib_uverbs_create_srq_resp resp;
3823 struct ib_udata udata;
3824 int ret;
3826 if (out_len < sizeof resp)
3827 return -ENOSPC;
3829 if (copy_from_user(&cmd, buf, sizeof cmd))
3830 return -EFAULT;
3832 memset(&xcmd, 0, sizeof(xcmd));
3833 xcmd.response = cmd.response;
3834 xcmd.user_handle = cmd.user_handle;
3835 xcmd.srq_type = IB_SRQT_BASIC;
3836 xcmd.pd_handle = cmd.pd_handle;
3837 xcmd.max_wr = cmd.max_wr;
3838 xcmd.max_sge = cmd.max_sge;
3839 xcmd.srq_limit = cmd.srq_limit;
3841 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
3842 u64_to_user_ptr(cmd.response) + sizeof(resp),
3843 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
3844 out_len - sizeof(resp));
3846 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
3847 if (ret)
3848 return ret;
3850 return in_len;
3853 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
3854 struct ib_device *ib_dev,
3855 const char __user *buf, int in_len, int out_len)
3857 struct ib_uverbs_create_xsrq cmd;
3858 struct ib_uverbs_create_srq_resp resp;
3859 struct ib_udata udata;
3860 int ret;
3862 if (out_len < sizeof resp)
3863 return -ENOSPC;
3865 if (copy_from_user(&cmd, buf, sizeof cmd))
3866 return -EFAULT;
3868 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
3869 u64_to_user_ptr(cmd.response) + sizeof(resp),
3870 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
3871 out_len - sizeof(resp));
3873 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
3874 if (ret)
3875 return ret;
3877 return in_len;
3880 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
3881 struct ib_device *ib_dev,
3882 const char __user *buf, int in_len,
3883 int out_len)
3885 struct ib_uverbs_modify_srq cmd;
3886 struct ib_udata udata;
3887 struct ib_srq *srq;
3888 struct ib_srq_attr attr;
3889 int ret;
3891 if (copy_from_user(&cmd, buf, sizeof cmd))
3892 return -EFAULT;
3894 ib_uverbs_init_udata(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
3895 out_len);
3897 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file->ucontext);
3898 if (!srq)
3899 return -EINVAL;
3901 attr.max_wr = cmd.max_wr;
3902 attr.srq_limit = cmd.srq_limit;
3904 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
3906 uobj_put_obj_read(srq);
3908 return ret ? ret : in_len;
3911 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
3912 struct ib_device *ib_dev,
3913 const char __user *buf,
3914 int in_len, int out_len)
3916 struct ib_uverbs_query_srq cmd;
3917 struct ib_uverbs_query_srq_resp resp;
3918 struct ib_srq_attr attr;
3919 struct ib_srq *srq;
3920 int ret;
3922 if (out_len < sizeof resp)
3923 return -ENOSPC;
3925 if (copy_from_user(&cmd, buf, sizeof cmd))
3926 return -EFAULT;
3928 srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file->ucontext);
3929 if (!srq)
3930 return -EINVAL;
3932 ret = ib_query_srq(srq, &attr);
3934 uobj_put_obj_read(srq);
3936 if (ret)
3937 return ret;
3939 memset(&resp, 0, sizeof resp);
3941 resp.max_wr = attr.max_wr;
3942 resp.max_sge = attr.max_sge;
3943 resp.srq_limit = attr.srq_limit;
3945 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
3946 return -EFAULT;
3948 return in_len;
3951 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3952 struct ib_device *ib_dev,
3953 const char __user *buf, int in_len,
3954 int out_len)
3956 struct ib_uverbs_destroy_srq cmd;
3957 struct ib_uverbs_destroy_srq_resp resp;
3958 struct ib_uobject *uobj;
3959 struct ib_uevent_object *obj;
3960 int ret = -EINVAL;
3962 if (copy_from_user(&cmd, buf, sizeof cmd))
3963 return -EFAULT;
3965 uobj = uobj_get_write(UVERBS_OBJECT_SRQ, cmd.srq_handle,
3966 file->ucontext);
3967 if (IS_ERR(uobj))
3968 return PTR_ERR(uobj);
3970 obj = container_of(uobj, struct ib_uevent_object, uobject);
3972 * Make sure we don't free the memory in remove_commit as we still
3973 * needs the uobject memory to create the response.
3975 uverbs_uobject_get(uobj);
3977 memset(&resp, 0, sizeof(resp));
3979 ret = uobj_remove_commit(uobj);
3980 if (ret) {
3981 uverbs_uobject_put(uobj);
3982 return ret;
3984 resp.events_reported = obj->events_reported;
3985 uverbs_uobject_put(uobj);
3986 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp)))
3987 return -EFAULT;
3989 return in_len;
3992 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
3993 struct ib_device *ib_dev,
3994 struct ib_udata *ucore,
3995 struct ib_udata *uhw)
3997 struct ib_uverbs_ex_query_device_resp resp = { {0} };
3998 struct ib_uverbs_ex_query_device cmd;
3999 struct ib_device_attr attr = {0};
4000 int err;
4002 if (!ib_dev->query_device)
4003 return -EOPNOTSUPP;
4005 if (ucore->inlen < sizeof(cmd))
4006 return -EINVAL;
4008 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
4009 if (err)
4010 return err;
4012 if (cmd.comp_mask)
4013 return -EINVAL;
4015 if (cmd.reserved)
4016 return -EINVAL;
4018 resp.response_length = offsetof(typeof(resp), odp_caps);
4020 if (ucore->outlen < resp.response_length)
4021 return -ENOSPC;
4023 err = ib_dev->query_device(ib_dev, &attr, uhw);
4024 if (err)
4025 return err;
4027 copy_query_dev_fields(file, ib_dev, &resp.base, &attr);
4029 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
4030 goto end;
4032 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
4033 resp.odp_caps.general_caps = attr.odp_caps.general_caps;
4034 resp.odp_caps.per_transport_caps.rc_odp_caps =
4035 attr.odp_caps.per_transport_caps.rc_odp_caps;
4036 resp.odp_caps.per_transport_caps.uc_odp_caps =
4037 attr.odp_caps.per_transport_caps.uc_odp_caps;
4038 resp.odp_caps.per_transport_caps.ud_odp_caps =
4039 attr.odp_caps.per_transport_caps.ud_odp_caps;
4040 #endif
4041 resp.response_length += sizeof(resp.odp_caps);
4043 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask))
4044 goto end;
4046 resp.timestamp_mask = attr.timestamp_mask;
4047 resp.response_length += sizeof(resp.timestamp_mask);
4049 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock))
4050 goto end;
4052 resp.hca_core_clock = attr.hca_core_clock;
4053 resp.response_length += sizeof(resp.hca_core_clock);
4055 if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex))
4056 goto end;
4058 resp.device_cap_flags_ex = attr.device_cap_flags;
4059 resp.response_length += sizeof(resp.device_cap_flags_ex);
4061 if (ucore->outlen < resp.response_length + sizeof(resp.rss_caps))
4062 goto end;
4064 resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts;
4065 resp.rss_caps.max_rwq_indirection_tables =
4066 attr.rss_caps.max_rwq_indirection_tables;
4067 resp.rss_caps.max_rwq_indirection_table_size =
4068 attr.rss_caps.max_rwq_indirection_table_size;
4070 resp.response_length += sizeof(resp.rss_caps);
4072 if (ucore->outlen < resp.response_length + sizeof(resp.max_wq_type_rq))
4073 goto end;
4075 resp.max_wq_type_rq = attr.max_wq_type_rq;
4076 resp.response_length += sizeof(resp.max_wq_type_rq);
4078 if (ucore->outlen < resp.response_length + sizeof(resp.raw_packet_caps))
4079 goto end;
4081 resp.raw_packet_caps = attr.raw_packet_caps;
4082 resp.response_length += sizeof(resp.raw_packet_caps);
4084 if (ucore->outlen < resp.response_length + sizeof(resp.tm_caps))
4085 goto end;
4087 resp.tm_caps.max_rndv_hdr_size = attr.tm_caps.max_rndv_hdr_size;
4088 resp.tm_caps.max_num_tags = attr.tm_caps.max_num_tags;
4089 resp.tm_caps.max_ops = attr.tm_caps.max_ops;
4090 resp.tm_caps.max_sge = attr.tm_caps.max_sge;
4091 resp.tm_caps.flags = attr.tm_caps.flags;
4092 resp.response_length += sizeof(resp.tm_caps);
4094 if (ucore->outlen < resp.response_length + sizeof(resp.cq_moderation_caps))
4095 goto end;
4097 resp.cq_moderation_caps.max_cq_moderation_count =
4098 attr.cq_caps.max_cq_moderation_count;
4099 resp.cq_moderation_caps.max_cq_moderation_period =
4100 attr.cq_caps.max_cq_moderation_period;
4101 resp.response_length += sizeof(resp.cq_moderation_caps);
4103 if (ucore->outlen < resp.response_length + sizeof(resp.max_dm_size))
4104 goto end;
4106 resp.max_dm_size = attr.max_dm_size;
4107 resp.response_length += sizeof(resp.max_dm_size);
4108 end:
4109 err = ib_copy_to_udata(ucore, &resp, resp.response_length);
4110 return err;
4113 int ib_uverbs_ex_modify_cq(struct ib_uverbs_file *file,
4114 struct ib_device *ib_dev,
4115 struct ib_udata *ucore,
4116 struct ib_udata *uhw)
4118 struct ib_uverbs_ex_modify_cq cmd = {};
4119 struct ib_cq *cq;
4120 size_t required_cmd_sz;
4121 int ret;
4123 required_cmd_sz = offsetof(typeof(cmd), reserved) +
4124 sizeof(cmd.reserved);
4125 if (ucore->inlen < required_cmd_sz)
4126 return -EINVAL;
4128 /* sanity checks */
4129 if (ucore->inlen > sizeof(cmd) &&
4130 !ib_is_udata_cleared(ucore, sizeof(cmd),
4131 ucore->inlen - sizeof(cmd)))
4132 return -EOPNOTSUPP;
4134 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
4135 if (ret)
4136 return ret;
4138 if (!cmd.attr_mask || cmd.reserved)
4139 return -EINVAL;
4141 if (cmd.attr_mask > IB_CQ_MODERATE)
4142 return -EOPNOTSUPP;
4144 cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file->ucontext);
4145 if (!cq)
4146 return -EINVAL;
4148 ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period);
4150 uobj_put_obj_read(cq);
4152 return ret;