x86/topology: Update the 'cpu cores' field in /proc/cpuinfo correctly across CPU...
[cris-mirror.git] / drivers / infiniband / core / uverbs_cmd.c
blob256934d1f64fba0e6461f857d372e31f82049b4c
1 /*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
36 #include <linux/file.h>
37 #include <linux/fs.h>
38 #include <linux/slab.h>
39 #include <linux/sched.h>
41 #include <linux/uaccess.h>
43 #include <rdma/uverbs_types.h>
44 #include <rdma/uverbs_std_types.h>
45 #include "rdma_core.h"
47 #include "uverbs.h"
48 #include "core_priv.h"
50 static struct ib_uverbs_completion_event_file *
51 ib_uverbs_lookup_comp_file(int fd, struct ib_ucontext *context)
53 struct ib_uobject *uobj = uobj_get_read(uobj_get_type(comp_channel),
54 fd, context);
55 struct ib_uobject_file *uobj_file;
57 if (IS_ERR(uobj))
58 return (void *)uobj;
60 uverbs_uobject_get(uobj);
61 uobj_put_read(uobj);
63 uobj_file = container_of(uobj, struct ib_uobject_file, uobj);
64 return container_of(uobj_file, struct ib_uverbs_completion_event_file,
65 uobj_file);
68 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
69 struct ib_device *ib_dev,
70 const char __user *buf,
71 int in_len, int out_len)
73 struct ib_uverbs_get_context cmd;
74 struct ib_uverbs_get_context_resp resp;
75 struct ib_udata udata;
76 struct ib_ucontext *ucontext;
77 struct file *filp;
78 struct ib_rdmacg_object cg_obj;
79 int ret;
81 if (out_len < sizeof resp)
82 return -ENOSPC;
84 if (copy_from_user(&cmd, buf, sizeof cmd))
85 return -EFAULT;
87 mutex_lock(&file->mutex);
89 if (file->ucontext) {
90 ret = -EINVAL;
91 goto err;
94 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
95 u64_to_user_ptr(cmd.response) + sizeof(resp),
96 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
97 out_len - sizeof(resp));
99 ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
100 if (ret)
101 goto err;
103 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata);
104 if (IS_ERR(ucontext)) {
105 ret = PTR_ERR(ucontext);
106 goto err_alloc;
109 ucontext->device = ib_dev;
110 ucontext->cg_obj = cg_obj;
111 /* ufile is required when some objects are released */
112 ucontext->ufile = file;
113 uverbs_initialize_ucontext(ucontext);
115 rcu_read_lock();
116 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
117 rcu_read_unlock();
118 ucontext->closing = 0;
120 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
121 ucontext->umem_tree = RB_ROOT_CACHED;
122 init_rwsem(&ucontext->umem_rwsem);
123 ucontext->odp_mrs_count = 0;
124 INIT_LIST_HEAD(&ucontext->no_private_counters);
126 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
127 ucontext->invalidate_range = NULL;
129 #endif
131 resp.num_comp_vectors = file->device->num_comp_vectors;
133 ret = get_unused_fd_flags(O_CLOEXEC);
134 if (ret < 0)
135 goto err_free;
136 resp.async_fd = ret;
138 filp = ib_uverbs_alloc_async_event_file(file, ib_dev);
139 if (IS_ERR(filp)) {
140 ret = PTR_ERR(filp);
141 goto err_fd;
144 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
145 ret = -EFAULT;
146 goto err_file;
149 file->ucontext = ucontext;
151 fd_install(resp.async_fd, filp);
153 mutex_unlock(&file->mutex);
155 return in_len;
157 err_file:
158 ib_uverbs_free_async_event_file(file);
159 fput(filp);
161 err_fd:
162 put_unused_fd(resp.async_fd);
164 err_free:
165 put_pid(ucontext->tgid);
166 ib_dev->dealloc_ucontext(ucontext);
168 err_alloc:
169 ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
171 err:
172 mutex_unlock(&file->mutex);
173 return ret;
176 static void copy_query_dev_fields(struct ib_uverbs_file *file,
177 struct ib_device *ib_dev,
178 struct ib_uverbs_query_device_resp *resp,
179 struct ib_device_attr *attr)
181 resp->fw_ver = attr->fw_ver;
182 resp->node_guid = ib_dev->node_guid;
183 resp->sys_image_guid = attr->sys_image_guid;
184 resp->max_mr_size = attr->max_mr_size;
185 resp->page_size_cap = attr->page_size_cap;
186 resp->vendor_id = attr->vendor_id;
187 resp->vendor_part_id = attr->vendor_part_id;
188 resp->hw_ver = attr->hw_ver;
189 resp->max_qp = attr->max_qp;
190 resp->max_qp_wr = attr->max_qp_wr;
191 resp->device_cap_flags = lower_32_bits(attr->device_cap_flags);
192 resp->max_sge = attr->max_sge;
193 resp->max_sge_rd = attr->max_sge_rd;
194 resp->max_cq = attr->max_cq;
195 resp->max_cqe = attr->max_cqe;
196 resp->max_mr = attr->max_mr;
197 resp->max_pd = attr->max_pd;
198 resp->max_qp_rd_atom = attr->max_qp_rd_atom;
199 resp->max_ee_rd_atom = attr->max_ee_rd_atom;
200 resp->max_res_rd_atom = attr->max_res_rd_atom;
201 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
202 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
203 resp->atomic_cap = attr->atomic_cap;
204 resp->max_ee = attr->max_ee;
205 resp->max_rdd = attr->max_rdd;
206 resp->max_mw = attr->max_mw;
207 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
208 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
209 resp->max_mcast_grp = attr->max_mcast_grp;
210 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
211 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
212 resp->max_ah = attr->max_ah;
213 resp->max_fmr = attr->max_fmr;
214 resp->max_map_per_fmr = attr->max_map_per_fmr;
215 resp->max_srq = attr->max_srq;
216 resp->max_srq_wr = attr->max_srq_wr;
217 resp->max_srq_sge = attr->max_srq_sge;
218 resp->max_pkeys = attr->max_pkeys;
219 resp->local_ca_ack_delay = attr->local_ca_ack_delay;
220 resp->phys_port_cnt = ib_dev->phys_port_cnt;
223 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
224 struct ib_device *ib_dev,
225 const char __user *buf,
226 int in_len, int out_len)
228 struct ib_uverbs_query_device cmd;
229 struct ib_uverbs_query_device_resp resp;
231 if (out_len < sizeof resp)
232 return -ENOSPC;
234 if (copy_from_user(&cmd, buf, sizeof cmd))
235 return -EFAULT;
237 memset(&resp, 0, sizeof resp);
238 copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs);
240 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
241 return -EFAULT;
243 return in_len;
246 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
247 struct ib_device *ib_dev,
248 const char __user *buf,
249 int in_len, int out_len)
251 struct ib_uverbs_query_port cmd;
252 struct ib_uverbs_query_port_resp resp;
253 struct ib_port_attr attr;
254 int ret;
256 if (out_len < sizeof resp)
257 return -ENOSPC;
259 if (copy_from_user(&cmd, buf, sizeof cmd))
260 return -EFAULT;
262 ret = ib_query_port(ib_dev, cmd.port_num, &attr);
263 if (ret)
264 return ret;
266 memset(&resp, 0, sizeof resp);
268 resp.state = attr.state;
269 resp.max_mtu = attr.max_mtu;
270 resp.active_mtu = attr.active_mtu;
271 resp.gid_tbl_len = attr.gid_tbl_len;
272 resp.port_cap_flags = attr.port_cap_flags;
273 resp.max_msg_sz = attr.max_msg_sz;
274 resp.bad_pkey_cntr = attr.bad_pkey_cntr;
275 resp.qkey_viol_cntr = attr.qkey_viol_cntr;
276 resp.pkey_tbl_len = attr.pkey_tbl_len;
278 if (rdma_cap_opa_ah(ib_dev, cmd.port_num)) {
279 resp.lid = OPA_TO_IB_UCAST_LID(attr.lid);
280 resp.sm_lid = OPA_TO_IB_UCAST_LID(attr.sm_lid);
281 } else {
282 resp.lid = ib_lid_cpu16(attr.lid);
283 resp.sm_lid = ib_lid_cpu16(attr.sm_lid);
285 resp.lmc = attr.lmc;
286 resp.max_vl_num = attr.max_vl_num;
287 resp.sm_sl = attr.sm_sl;
288 resp.subnet_timeout = attr.subnet_timeout;
289 resp.init_type_reply = attr.init_type_reply;
290 resp.active_width = attr.active_width;
291 resp.active_speed = attr.active_speed;
292 resp.phys_state = attr.phys_state;
293 resp.link_layer = rdma_port_get_link_layer(ib_dev,
294 cmd.port_num);
296 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
297 return -EFAULT;
299 return in_len;
302 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
303 struct ib_device *ib_dev,
304 const char __user *buf,
305 int in_len, int out_len)
307 struct ib_uverbs_alloc_pd cmd;
308 struct ib_uverbs_alloc_pd_resp resp;
309 struct ib_udata udata;
310 struct ib_uobject *uobj;
311 struct ib_pd *pd;
312 int ret;
314 if (out_len < sizeof resp)
315 return -ENOSPC;
317 if (copy_from_user(&cmd, buf, sizeof cmd))
318 return -EFAULT;
320 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
321 u64_to_user_ptr(cmd.response) + sizeof(resp),
322 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
323 out_len - sizeof(resp));
325 uobj = uobj_alloc(uobj_get_type(pd), file->ucontext);
326 if (IS_ERR(uobj))
327 return PTR_ERR(uobj);
329 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata);
330 if (IS_ERR(pd)) {
331 ret = PTR_ERR(pd);
332 goto err;
335 pd->device = ib_dev;
336 pd->uobject = uobj;
337 pd->__internal_mr = NULL;
338 atomic_set(&pd->usecnt, 0);
340 uobj->object = pd;
341 memset(&resp, 0, sizeof resp);
342 resp.pd_handle = uobj->id;
343 pd->res.type = RDMA_RESTRACK_PD;
344 rdma_restrack_add(&pd->res);
346 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
347 ret = -EFAULT;
348 goto err_copy;
351 uobj_alloc_commit(uobj);
353 return in_len;
355 err_copy:
356 ib_dealloc_pd(pd);
358 err:
359 uobj_alloc_abort(uobj);
360 return ret;
363 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
364 struct ib_device *ib_dev,
365 const char __user *buf,
366 int in_len, int out_len)
368 struct ib_uverbs_dealloc_pd cmd;
369 struct ib_uobject *uobj;
370 int ret;
372 if (copy_from_user(&cmd, buf, sizeof cmd))
373 return -EFAULT;
375 uobj = uobj_get_write(uobj_get_type(pd), cmd.pd_handle,
376 file->ucontext);
377 if (IS_ERR(uobj))
378 return PTR_ERR(uobj);
380 ret = uobj_remove_commit(uobj);
382 return ret ?: in_len;
385 struct xrcd_table_entry {
386 struct rb_node node;
387 struct ib_xrcd *xrcd;
388 struct inode *inode;
391 static int xrcd_table_insert(struct ib_uverbs_device *dev,
392 struct inode *inode,
393 struct ib_xrcd *xrcd)
395 struct xrcd_table_entry *entry, *scan;
396 struct rb_node **p = &dev->xrcd_tree.rb_node;
397 struct rb_node *parent = NULL;
399 entry = kmalloc(sizeof *entry, GFP_KERNEL);
400 if (!entry)
401 return -ENOMEM;
403 entry->xrcd = xrcd;
404 entry->inode = inode;
406 while (*p) {
407 parent = *p;
408 scan = rb_entry(parent, struct xrcd_table_entry, node);
410 if (inode < scan->inode) {
411 p = &(*p)->rb_left;
412 } else if (inode > scan->inode) {
413 p = &(*p)->rb_right;
414 } else {
415 kfree(entry);
416 return -EEXIST;
420 rb_link_node(&entry->node, parent, p);
421 rb_insert_color(&entry->node, &dev->xrcd_tree);
422 igrab(inode);
423 return 0;
426 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
427 struct inode *inode)
429 struct xrcd_table_entry *entry;
430 struct rb_node *p = dev->xrcd_tree.rb_node;
432 while (p) {
433 entry = rb_entry(p, struct xrcd_table_entry, node);
435 if (inode < entry->inode)
436 p = p->rb_left;
437 else if (inode > entry->inode)
438 p = p->rb_right;
439 else
440 return entry;
443 return NULL;
446 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
448 struct xrcd_table_entry *entry;
450 entry = xrcd_table_search(dev, inode);
451 if (!entry)
452 return NULL;
454 return entry->xrcd;
457 static void xrcd_table_delete(struct ib_uverbs_device *dev,
458 struct inode *inode)
460 struct xrcd_table_entry *entry;
462 entry = xrcd_table_search(dev, inode);
463 if (entry) {
464 iput(inode);
465 rb_erase(&entry->node, &dev->xrcd_tree);
466 kfree(entry);
470 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
471 struct ib_device *ib_dev,
472 const char __user *buf, int in_len,
473 int out_len)
475 struct ib_uverbs_open_xrcd cmd;
476 struct ib_uverbs_open_xrcd_resp resp;
477 struct ib_udata udata;
478 struct ib_uxrcd_object *obj;
479 struct ib_xrcd *xrcd = NULL;
480 struct fd f = {NULL, 0};
481 struct inode *inode = NULL;
482 int ret = 0;
483 int new_xrcd = 0;
485 if (out_len < sizeof resp)
486 return -ENOSPC;
488 if (copy_from_user(&cmd, buf, sizeof cmd))
489 return -EFAULT;
491 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
492 u64_to_user_ptr(cmd.response) + sizeof(resp),
493 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
494 out_len - sizeof(resp));
496 mutex_lock(&file->device->xrcd_tree_mutex);
498 if (cmd.fd != -1) {
499 /* search for file descriptor */
500 f = fdget(cmd.fd);
501 if (!f.file) {
502 ret = -EBADF;
503 goto err_tree_mutex_unlock;
506 inode = file_inode(f.file);
507 xrcd = find_xrcd(file->device, inode);
508 if (!xrcd && !(cmd.oflags & O_CREAT)) {
509 /* no file descriptor. Need CREATE flag */
510 ret = -EAGAIN;
511 goto err_tree_mutex_unlock;
514 if (xrcd && cmd.oflags & O_EXCL) {
515 ret = -EINVAL;
516 goto err_tree_mutex_unlock;
520 obj = (struct ib_uxrcd_object *)uobj_alloc(uobj_get_type(xrcd),
521 file->ucontext);
522 if (IS_ERR(obj)) {
523 ret = PTR_ERR(obj);
524 goto err_tree_mutex_unlock;
527 if (!xrcd) {
528 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata);
529 if (IS_ERR(xrcd)) {
530 ret = PTR_ERR(xrcd);
531 goto err;
534 xrcd->inode = inode;
535 xrcd->device = ib_dev;
536 atomic_set(&xrcd->usecnt, 0);
537 mutex_init(&xrcd->tgt_qp_mutex);
538 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
539 new_xrcd = 1;
542 atomic_set(&obj->refcnt, 0);
543 obj->uobject.object = xrcd;
544 memset(&resp, 0, sizeof resp);
545 resp.xrcd_handle = obj->uobject.id;
547 if (inode) {
548 if (new_xrcd) {
549 /* create new inode/xrcd table entry */
550 ret = xrcd_table_insert(file->device, inode, xrcd);
551 if (ret)
552 goto err_dealloc_xrcd;
554 atomic_inc(&xrcd->usecnt);
557 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
558 ret = -EFAULT;
559 goto err_copy;
562 if (f.file)
563 fdput(f);
565 uobj_alloc_commit(&obj->uobject);
567 mutex_unlock(&file->device->xrcd_tree_mutex);
568 return in_len;
570 err_copy:
571 if (inode) {
572 if (new_xrcd)
573 xrcd_table_delete(file->device, inode);
574 atomic_dec(&xrcd->usecnt);
577 err_dealloc_xrcd:
578 ib_dealloc_xrcd(xrcd);
580 err:
581 uobj_alloc_abort(&obj->uobject);
583 err_tree_mutex_unlock:
584 if (f.file)
585 fdput(f);
587 mutex_unlock(&file->device->xrcd_tree_mutex);
589 return ret;
592 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
593 struct ib_device *ib_dev,
594 const char __user *buf, int in_len,
595 int out_len)
597 struct ib_uverbs_close_xrcd cmd;
598 struct ib_uobject *uobj;
599 int ret = 0;
601 if (copy_from_user(&cmd, buf, sizeof cmd))
602 return -EFAULT;
604 uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle,
605 file->ucontext);
606 if (IS_ERR(uobj)) {
607 mutex_unlock(&file->device->xrcd_tree_mutex);
608 return PTR_ERR(uobj);
611 ret = uobj_remove_commit(uobj);
612 return ret ?: in_len;
615 int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
616 struct ib_xrcd *xrcd,
617 enum rdma_remove_reason why)
619 struct inode *inode;
620 int ret;
622 inode = xrcd->inode;
623 if (inode && !atomic_dec_and_test(&xrcd->usecnt))
624 return 0;
626 ret = ib_dealloc_xrcd(xrcd);
628 if (why == RDMA_REMOVE_DESTROY && ret)
629 atomic_inc(&xrcd->usecnt);
630 else if (inode)
631 xrcd_table_delete(dev, inode);
633 return ret;
636 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
637 struct ib_device *ib_dev,
638 const char __user *buf, int in_len,
639 int out_len)
641 struct ib_uverbs_reg_mr cmd;
642 struct ib_uverbs_reg_mr_resp resp;
643 struct ib_udata udata;
644 struct ib_uobject *uobj;
645 struct ib_pd *pd;
646 struct ib_mr *mr;
647 int ret;
649 if (out_len < sizeof resp)
650 return -ENOSPC;
652 if (copy_from_user(&cmd, buf, sizeof cmd))
653 return -EFAULT;
655 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
656 u64_to_user_ptr(cmd.response) + sizeof(resp),
657 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
658 out_len - sizeof(resp));
660 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
661 return -EINVAL;
663 ret = ib_check_mr_access(cmd.access_flags);
664 if (ret)
665 return ret;
667 uobj = uobj_alloc(uobj_get_type(mr), file->ucontext);
668 if (IS_ERR(uobj))
669 return PTR_ERR(uobj);
671 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
672 if (!pd) {
673 ret = -EINVAL;
674 goto err_free;
677 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
678 if (!(pd->device->attrs.device_cap_flags &
679 IB_DEVICE_ON_DEMAND_PAGING)) {
680 pr_debug("ODP support not available\n");
681 ret = -EINVAL;
682 goto err_put;
686 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
687 cmd.access_flags, &udata);
688 if (IS_ERR(mr)) {
689 ret = PTR_ERR(mr);
690 goto err_put;
693 mr->device = pd->device;
694 mr->pd = pd;
695 mr->uobject = uobj;
696 atomic_inc(&pd->usecnt);
698 uobj->object = mr;
700 memset(&resp, 0, sizeof resp);
701 resp.lkey = mr->lkey;
702 resp.rkey = mr->rkey;
703 resp.mr_handle = uobj->id;
705 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
706 ret = -EFAULT;
707 goto err_copy;
710 uobj_put_obj_read(pd);
712 uobj_alloc_commit(uobj);
714 return in_len;
716 err_copy:
717 ib_dereg_mr(mr);
719 err_put:
720 uobj_put_obj_read(pd);
722 err_free:
723 uobj_alloc_abort(uobj);
724 return ret;
727 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
728 struct ib_device *ib_dev,
729 const char __user *buf, int in_len,
730 int out_len)
732 struct ib_uverbs_rereg_mr cmd;
733 struct ib_uverbs_rereg_mr_resp resp;
734 struct ib_udata udata;
735 struct ib_pd *pd = NULL;
736 struct ib_mr *mr;
737 struct ib_pd *old_pd;
738 int ret;
739 struct ib_uobject *uobj;
741 if (out_len < sizeof(resp))
742 return -ENOSPC;
744 if (copy_from_user(&cmd, buf, sizeof(cmd)))
745 return -EFAULT;
747 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
748 u64_to_user_ptr(cmd.response) + sizeof(resp),
749 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
750 out_len - sizeof(resp));
752 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
753 return -EINVAL;
755 if ((cmd.flags & IB_MR_REREG_TRANS) &&
756 (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
757 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
758 return -EINVAL;
760 uobj = uobj_get_write(uobj_get_type(mr), cmd.mr_handle,
761 file->ucontext);
762 if (IS_ERR(uobj))
763 return PTR_ERR(uobj);
765 mr = uobj->object;
767 if (cmd.flags & IB_MR_REREG_ACCESS) {
768 ret = ib_check_mr_access(cmd.access_flags);
769 if (ret)
770 goto put_uobjs;
773 if (cmd.flags & IB_MR_REREG_PD) {
774 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
775 if (!pd) {
776 ret = -EINVAL;
777 goto put_uobjs;
781 old_pd = mr->pd;
782 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start,
783 cmd.length, cmd.hca_va,
784 cmd.access_flags, pd, &udata);
785 if (!ret) {
786 if (cmd.flags & IB_MR_REREG_PD) {
787 atomic_inc(&pd->usecnt);
788 mr->pd = pd;
789 atomic_dec(&old_pd->usecnt);
791 } else {
792 goto put_uobj_pd;
795 memset(&resp, 0, sizeof(resp));
796 resp.lkey = mr->lkey;
797 resp.rkey = mr->rkey;
799 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp)))
800 ret = -EFAULT;
801 else
802 ret = in_len;
804 put_uobj_pd:
805 if (cmd.flags & IB_MR_REREG_PD)
806 uobj_put_obj_read(pd);
808 put_uobjs:
809 uobj_put_write(uobj);
811 return ret;
814 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
815 struct ib_device *ib_dev,
816 const char __user *buf, int in_len,
817 int out_len)
819 struct ib_uverbs_dereg_mr cmd;
820 struct ib_uobject *uobj;
821 int ret = -EINVAL;
823 if (copy_from_user(&cmd, buf, sizeof cmd))
824 return -EFAULT;
826 uobj = uobj_get_write(uobj_get_type(mr), cmd.mr_handle,
827 file->ucontext);
828 if (IS_ERR(uobj))
829 return PTR_ERR(uobj);
831 ret = uobj_remove_commit(uobj);
833 return ret ?: in_len;
836 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
837 struct ib_device *ib_dev,
838 const char __user *buf, int in_len,
839 int out_len)
841 struct ib_uverbs_alloc_mw cmd;
842 struct ib_uverbs_alloc_mw_resp resp;
843 struct ib_uobject *uobj;
844 struct ib_pd *pd;
845 struct ib_mw *mw;
846 struct ib_udata udata;
847 int ret;
849 if (out_len < sizeof(resp))
850 return -ENOSPC;
852 if (copy_from_user(&cmd, buf, sizeof(cmd)))
853 return -EFAULT;
855 uobj = uobj_alloc(uobj_get_type(mw), file->ucontext);
856 if (IS_ERR(uobj))
857 return PTR_ERR(uobj);
859 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
860 if (!pd) {
861 ret = -EINVAL;
862 goto err_free;
865 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
866 u64_to_user_ptr(cmd.response) + sizeof(resp),
867 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
868 out_len - sizeof(resp));
870 mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata);
871 if (IS_ERR(mw)) {
872 ret = PTR_ERR(mw);
873 goto err_put;
876 mw->device = pd->device;
877 mw->pd = pd;
878 mw->uobject = uobj;
879 atomic_inc(&pd->usecnt);
881 uobj->object = mw;
883 memset(&resp, 0, sizeof(resp));
884 resp.rkey = mw->rkey;
885 resp.mw_handle = uobj->id;
887 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) {
888 ret = -EFAULT;
889 goto err_copy;
892 uobj_put_obj_read(pd);
893 uobj_alloc_commit(uobj);
895 return in_len;
897 err_copy:
898 uverbs_dealloc_mw(mw);
899 err_put:
900 uobj_put_obj_read(pd);
901 err_free:
902 uobj_alloc_abort(uobj);
903 return ret;
906 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
907 struct ib_device *ib_dev,
908 const char __user *buf, int in_len,
909 int out_len)
911 struct ib_uverbs_dealloc_mw cmd;
912 struct ib_uobject *uobj;
913 int ret = -EINVAL;
915 if (copy_from_user(&cmd, buf, sizeof(cmd)))
916 return -EFAULT;
918 uobj = uobj_get_write(uobj_get_type(mw), cmd.mw_handle,
919 file->ucontext);
920 if (IS_ERR(uobj))
921 return PTR_ERR(uobj);
923 ret = uobj_remove_commit(uobj);
924 return ret ?: in_len;
927 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
928 struct ib_device *ib_dev,
929 const char __user *buf, int in_len,
930 int out_len)
932 struct ib_uverbs_create_comp_channel cmd;
933 struct ib_uverbs_create_comp_channel_resp resp;
934 struct ib_uobject *uobj;
935 struct ib_uverbs_completion_event_file *ev_file;
937 if (out_len < sizeof resp)
938 return -ENOSPC;
940 if (copy_from_user(&cmd, buf, sizeof cmd))
941 return -EFAULT;
943 uobj = uobj_alloc(uobj_get_type(comp_channel), file->ucontext);
944 if (IS_ERR(uobj))
945 return PTR_ERR(uobj);
947 resp.fd = uobj->id;
949 ev_file = container_of(uobj, struct ib_uverbs_completion_event_file,
950 uobj_file.uobj);
951 ib_uverbs_init_event_queue(&ev_file->ev_queue);
953 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
954 uobj_alloc_abort(uobj);
955 return -EFAULT;
958 uobj_alloc_commit(uobj);
959 return in_len;
962 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
963 struct ib_device *ib_dev,
964 struct ib_udata *ucore,
965 struct ib_udata *uhw,
966 struct ib_uverbs_ex_create_cq *cmd,
967 size_t cmd_sz,
968 int (*cb)(struct ib_uverbs_file *file,
969 struct ib_ucq_object *obj,
970 struct ib_uverbs_ex_create_cq_resp *resp,
971 struct ib_udata *udata,
972 void *context),
973 void *context)
975 struct ib_ucq_object *obj;
976 struct ib_uverbs_completion_event_file *ev_file = NULL;
977 struct ib_cq *cq;
978 int ret;
979 struct ib_uverbs_ex_create_cq_resp resp;
980 struct ib_cq_init_attr attr = {};
982 if (cmd->comp_vector >= file->device->num_comp_vectors)
983 return ERR_PTR(-EINVAL);
985 obj = (struct ib_ucq_object *)uobj_alloc(uobj_get_type(cq),
986 file->ucontext);
987 if (IS_ERR(obj))
988 return obj;
990 if (cmd->comp_channel >= 0) {
991 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel,
992 file->ucontext);
993 if (IS_ERR(ev_file)) {
994 ret = PTR_ERR(ev_file);
995 goto err;
999 obj->uobject.user_handle = cmd->user_handle;
1000 obj->uverbs_file = file;
1001 obj->comp_events_reported = 0;
1002 obj->async_events_reported = 0;
1003 INIT_LIST_HEAD(&obj->comp_list);
1004 INIT_LIST_HEAD(&obj->async_list);
1006 attr.cqe = cmd->cqe;
1007 attr.comp_vector = cmd->comp_vector;
1009 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags))
1010 attr.flags = cmd->flags;
1012 cq = ib_dev->create_cq(ib_dev, &attr, file->ucontext, uhw);
1013 if (IS_ERR(cq)) {
1014 ret = PTR_ERR(cq);
1015 goto err_file;
1018 cq->device = ib_dev;
1019 cq->uobject = &obj->uobject;
1020 cq->comp_handler = ib_uverbs_comp_handler;
1021 cq->event_handler = ib_uverbs_cq_event_handler;
1022 cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
1023 atomic_set(&cq->usecnt, 0);
1025 obj->uobject.object = cq;
1026 memset(&resp, 0, sizeof resp);
1027 resp.base.cq_handle = obj->uobject.id;
1028 resp.base.cqe = cq->cqe;
1030 resp.response_length = offsetof(typeof(resp), response_length) +
1031 sizeof(resp.response_length);
1033 ret = cb(file, obj, &resp, ucore, context);
1034 if (ret)
1035 goto err_cb;
1037 uobj_alloc_commit(&obj->uobject);
1038 cq->res.type = RDMA_RESTRACK_CQ;
1039 rdma_restrack_add(&cq->res);
1041 return obj;
1043 err_cb:
1044 ib_destroy_cq(cq);
1046 err_file:
1047 if (ev_file)
1048 ib_uverbs_release_ucq(file, ev_file, obj);
1050 err:
1051 uobj_alloc_abort(&obj->uobject);
1053 return ERR_PTR(ret);
1056 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file,
1057 struct ib_ucq_object *obj,
1058 struct ib_uverbs_ex_create_cq_resp *resp,
1059 struct ib_udata *ucore, void *context)
1061 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1062 return -EFAULT;
1064 return 0;
1067 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
1068 struct ib_device *ib_dev,
1069 const char __user *buf, int in_len,
1070 int out_len)
1072 struct ib_uverbs_create_cq cmd;
1073 struct ib_uverbs_ex_create_cq cmd_ex;
1074 struct ib_uverbs_create_cq_resp resp;
1075 struct ib_udata ucore;
1076 struct ib_udata uhw;
1077 struct ib_ucq_object *obj;
1079 if (out_len < sizeof(resp))
1080 return -ENOSPC;
1082 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1083 return -EFAULT;
1085 ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response),
1086 sizeof(cmd), sizeof(resp));
1088 ib_uverbs_init_udata(&uhw, buf + sizeof(cmd),
1089 u64_to_user_ptr(cmd.response) + sizeof(resp),
1090 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1091 out_len - sizeof(resp));
1093 memset(&cmd_ex, 0, sizeof(cmd_ex));
1094 cmd_ex.user_handle = cmd.user_handle;
1095 cmd_ex.cqe = cmd.cqe;
1096 cmd_ex.comp_vector = cmd.comp_vector;
1097 cmd_ex.comp_channel = cmd.comp_channel;
1099 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex,
1100 offsetof(typeof(cmd_ex), comp_channel) +
1101 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb,
1102 NULL);
1104 if (IS_ERR(obj))
1105 return PTR_ERR(obj);
1107 return in_len;
1110 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file,
1111 struct ib_ucq_object *obj,
1112 struct ib_uverbs_ex_create_cq_resp *resp,
1113 struct ib_udata *ucore, void *context)
1115 if (ib_copy_to_udata(ucore, resp, resp->response_length))
1116 return -EFAULT;
1118 return 0;
1121 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
1122 struct ib_device *ib_dev,
1123 struct ib_udata *ucore,
1124 struct ib_udata *uhw)
1126 struct ib_uverbs_ex_create_cq_resp resp;
1127 struct ib_uverbs_ex_create_cq cmd;
1128 struct ib_ucq_object *obj;
1129 int err;
1131 if (ucore->inlen < sizeof(cmd))
1132 return -EINVAL;
1134 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
1135 if (err)
1136 return err;
1138 if (cmd.comp_mask)
1139 return -EINVAL;
1141 if (cmd.reserved)
1142 return -EINVAL;
1144 if (ucore->outlen < (offsetof(typeof(resp), response_length) +
1145 sizeof(resp.response_length)))
1146 return -ENOSPC;
1148 obj = create_cq(file, ib_dev, ucore, uhw, &cmd,
1149 min(ucore->inlen, sizeof(cmd)),
1150 ib_uverbs_ex_create_cq_cb, NULL);
1152 return PTR_ERR_OR_ZERO(obj);
1155 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
1156 struct ib_device *ib_dev,
1157 const char __user *buf, int in_len,
1158 int out_len)
1160 struct ib_uverbs_resize_cq cmd;
1161 struct ib_uverbs_resize_cq_resp resp = {};
1162 struct ib_udata udata;
1163 struct ib_cq *cq;
1164 int ret = -EINVAL;
1166 if (copy_from_user(&cmd, buf, sizeof cmd))
1167 return -EFAULT;
1169 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
1170 u64_to_user_ptr(cmd.response) + sizeof(resp),
1171 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1172 out_len - sizeof(resp));
1174 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
1175 if (!cq)
1176 return -EINVAL;
1178 ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
1179 if (ret)
1180 goto out;
1182 resp.cqe = cq->cqe;
1184 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp.cqe))
1185 ret = -EFAULT;
1187 out:
1188 uobj_put_obj_read(cq);
1190 return ret ? ret : in_len;
1193 static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest,
1194 struct ib_wc *wc)
1196 struct ib_uverbs_wc tmp;
1198 tmp.wr_id = wc->wr_id;
1199 tmp.status = wc->status;
1200 tmp.opcode = wc->opcode;
1201 tmp.vendor_err = wc->vendor_err;
1202 tmp.byte_len = wc->byte_len;
1203 tmp.ex.imm_data = wc->ex.imm_data;
1204 tmp.qp_num = wc->qp->qp_num;
1205 tmp.src_qp = wc->src_qp;
1206 tmp.wc_flags = wc->wc_flags;
1207 tmp.pkey_index = wc->pkey_index;
1208 if (rdma_cap_opa_ah(ib_dev, wc->port_num))
1209 tmp.slid = OPA_TO_IB_UCAST_LID(wc->slid);
1210 else
1211 tmp.slid = ib_lid_cpu16(wc->slid);
1212 tmp.sl = wc->sl;
1213 tmp.dlid_path_bits = wc->dlid_path_bits;
1214 tmp.port_num = wc->port_num;
1215 tmp.reserved = 0;
1217 if (copy_to_user(dest, &tmp, sizeof tmp))
1218 return -EFAULT;
1220 return 0;
1223 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
1224 struct ib_device *ib_dev,
1225 const char __user *buf, int in_len,
1226 int out_len)
1228 struct ib_uverbs_poll_cq cmd;
1229 struct ib_uverbs_poll_cq_resp resp;
1230 u8 __user *header_ptr;
1231 u8 __user *data_ptr;
1232 struct ib_cq *cq;
1233 struct ib_wc wc;
1234 int ret;
1236 if (copy_from_user(&cmd, buf, sizeof cmd))
1237 return -EFAULT;
1239 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
1240 if (!cq)
1241 return -EINVAL;
1243 /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1244 header_ptr = u64_to_user_ptr(cmd.response);
1245 data_ptr = header_ptr + sizeof resp;
1247 memset(&resp, 0, sizeof resp);
1248 while (resp.count < cmd.ne) {
1249 ret = ib_poll_cq(cq, 1, &wc);
1250 if (ret < 0)
1251 goto out_put;
1252 if (!ret)
1253 break;
1255 ret = copy_wc_to_user(ib_dev, data_ptr, &wc);
1256 if (ret)
1257 goto out_put;
1259 data_ptr += sizeof(struct ib_uverbs_wc);
1260 ++resp.count;
1263 if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1264 ret = -EFAULT;
1265 goto out_put;
1268 ret = in_len;
1270 out_put:
1271 uobj_put_obj_read(cq);
1272 return ret;
1275 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
1276 struct ib_device *ib_dev,
1277 const char __user *buf, int in_len,
1278 int out_len)
1280 struct ib_uverbs_req_notify_cq cmd;
1281 struct ib_cq *cq;
1283 if (copy_from_user(&cmd, buf, sizeof cmd))
1284 return -EFAULT;
1286 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
1287 if (!cq)
1288 return -EINVAL;
1290 ib_req_notify_cq(cq, cmd.solicited_only ?
1291 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1293 uobj_put_obj_read(cq);
1295 return in_len;
1298 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
1299 struct ib_device *ib_dev,
1300 const char __user *buf, int in_len,
1301 int out_len)
1303 struct ib_uverbs_destroy_cq cmd;
1304 struct ib_uverbs_destroy_cq_resp resp;
1305 struct ib_uobject *uobj;
1306 struct ib_cq *cq;
1307 struct ib_ucq_object *obj;
1308 int ret = -EINVAL;
1310 if (copy_from_user(&cmd, buf, sizeof cmd))
1311 return -EFAULT;
1313 uobj = uobj_get_write(uobj_get_type(cq), cmd.cq_handle,
1314 file->ucontext);
1315 if (IS_ERR(uobj))
1316 return PTR_ERR(uobj);
1319 * Make sure we don't free the memory in remove_commit as we still
1320 * needs the uobject memory to create the response.
1322 uverbs_uobject_get(uobj);
1323 cq = uobj->object;
1324 obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
1326 memset(&resp, 0, sizeof(resp));
1328 ret = uobj_remove_commit(uobj);
1329 if (ret) {
1330 uverbs_uobject_put(uobj);
1331 return ret;
1334 resp.comp_events_reported = obj->comp_events_reported;
1335 resp.async_events_reported = obj->async_events_reported;
1337 uverbs_uobject_put(uobj);
1338 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
1339 return -EFAULT;
1341 return in_len;
1344 static int create_qp(struct ib_uverbs_file *file,
1345 struct ib_udata *ucore,
1346 struct ib_udata *uhw,
1347 struct ib_uverbs_ex_create_qp *cmd,
1348 size_t cmd_sz,
1349 int (*cb)(struct ib_uverbs_file *file,
1350 struct ib_uverbs_ex_create_qp_resp *resp,
1351 struct ib_udata *udata),
1352 void *context)
1354 struct ib_uqp_object *obj;
1355 struct ib_device *device;
1356 struct ib_pd *pd = NULL;
1357 struct ib_xrcd *xrcd = NULL;
1358 struct ib_uobject *xrcd_uobj = ERR_PTR(-ENOENT);
1359 struct ib_cq *scq = NULL, *rcq = NULL;
1360 struct ib_srq *srq = NULL;
1361 struct ib_qp *qp;
1362 char *buf;
1363 struct ib_qp_init_attr attr = {};
1364 struct ib_uverbs_ex_create_qp_resp resp;
1365 int ret;
1366 struct ib_rwq_ind_table *ind_tbl = NULL;
1367 bool has_sq = true;
1369 if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
1370 return -EPERM;
1372 obj = (struct ib_uqp_object *)uobj_alloc(uobj_get_type(qp),
1373 file->ucontext);
1374 if (IS_ERR(obj))
1375 return PTR_ERR(obj);
1376 obj->uxrcd = NULL;
1377 obj->uevent.uobject.user_handle = cmd->user_handle;
1378 mutex_init(&obj->mcast_lock);
1380 if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) +
1381 sizeof(cmd->rwq_ind_tbl_handle) &&
1382 (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) {
1383 ind_tbl = uobj_get_obj_read(rwq_ind_table,
1384 cmd->rwq_ind_tbl_handle,
1385 file->ucontext);
1386 if (!ind_tbl) {
1387 ret = -EINVAL;
1388 goto err_put;
1391 attr.rwq_ind_tbl = ind_tbl;
1394 if (cmd_sz > sizeof(*cmd) &&
1395 !ib_is_udata_cleared(ucore, sizeof(*cmd),
1396 cmd_sz - sizeof(*cmd))) {
1397 ret = -EOPNOTSUPP;
1398 goto err_put;
1401 if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) {
1402 ret = -EINVAL;
1403 goto err_put;
1406 if (ind_tbl && !cmd->max_send_wr)
1407 has_sq = false;
1409 if (cmd->qp_type == IB_QPT_XRC_TGT) {
1410 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd->pd_handle,
1411 file->ucontext);
1413 if (IS_ERR(xrcd_uobj)) {
1414 ret = -EINVAL;
1415 goto err_put;
1418 xrcd = (struct ib_xrcd *)xrcd_uobj->object;
1419 if (!xrcd) {
1420 ret = -EINVAL;
1421 goto err_put;
1423 device = xrcd->device;
1424 } else {
1425 if (cmd->qp_type == IB_QPT_XRC_INI) {
1426 cmd->max_recv_wr = 0;
1427 cmd->max_recv_sge = 0;
1428 } else {
1429 if (cmd->is_srq) {
1430 srq = uobj_get_obj_read(srq, cmd->srq_handle,
1431 file->ucontext);
1432 if (!srq || srq->srq_type == IB_SRQT_XRC) {
1433 ret = -EINVAL;
1434 goto err_put;
1438 if (!ind_tbl) {
1439 if (cmd->recv_cq_handle != cmd->send_cq_handle) {
1440 rcq = uobj_get_obj_read(cq, cmd->recv_cq_handle,
1441 file->ucontext);
1442 if (!rcq) {
1443 ret = -EINVAL;
1444 goto err_put;
1450 if (has_sq)
1451 scq = uobj_get_obj_read(cq, cmd->send_cq_handle,
1452 file->ucontext);
1453 if (!ind_tbl)
1454 rcq = rcq ?: scq;
1455 pd = uobj_get_obj_read(pd, cmd->pd_handle, file->ucontext);
1456 if (!pd || (!scq && has_sq)) {
1457 ret = -EINVAL;
1458 goto err_put;
1461 device = pd->device;
1464 attr.event_handler = ib_uverbs_qp_event_handler;
1465 attr.qp_context = file;
1466 attr.send_cq = scq;
1467 attr.recv_cq = rcq;
1468 attr.srq = srq;
1469 attr.xrcd = xrcd;
1470 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR :
1471 IB_SIGNAL_REQ_WR;
1472 attr.qp_type = cmd->qp_type;
1473 attr.create_flags = 0;
1475 attr.cap.max_send_wr = cmd->max_send_wr;
1476 attr.cap.max_recv_wr = cmd->max_recv_wr;
1477 attr.cap.max_send_sge = cmd->max_send_sge;
1478 attr.cap.max_recv_sge = cmd->max_recv_sge;
1479 attr.cap.max_inline_data = cmd->max_inline_data;
1481 obj->uevent.events_reported = 0;
1482 INIT_LIST_HEAD(&obj->uevent.event_list);
1483 INIT_LIST_HEAD(&obj->mcast_list);
1485 if (cmd_sz >= offsetof(typeof(*cmd), create_flags) +
1486 sizeof(cmd->create_flags))
1487 attr.create_flags = cmd->create_flags;
1489 if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
1490 IB_QP_CREATE_CROSS_CHANNEL |
1491 IB_QP_CREATE_MANAGED_SEND |
1492 IB_QP_CREATE_MANAGED_RECV |
1493 IB_QP_CREATE_SCATTER_FCS |
1494 IB_QP_CREATE_CVLAN_STRIPPING |
1495 IB_QP_CREATE_SOURCE_QPN |
1496 IB_QP_CREATE_PCI_WRITE_END_PADDING)) {
1497 ret = -EINVAL;
1498 goto err_put;
1501 if (attr.create_flags & IB_QP_CREATE_SOURCE_QPN) {
1502 if (!capable(CAP_NET_RAW)) {
1503 ret = -EPERM;
1504 goto err_put;
1507 attr.source_qpn = cmd->source_qpn;
1510 buf = (void *)cmd + sizeof(*cmd);
1511 if (cmd_sz > sizeof(*cmd))
1512 if (!(buf[0] == 0 && !memcmp(buf, buf + 1,
1513 cmd_sz - sizeof(*cmd) - 1))) {
1514 ret = -EINVAL;
1515 goto err_put;
1518 if (cmd->qp_type == IB_QPT_XRC_TGT)
1519 qp = ib_create_qp(pd, &attr);
1520 else
1521 qp = _ib_create_qp(device, pd, &attr, uhw);
1523 if (IS_ERR(qp)) {
1524 ret = PTR_ERR(qp);
1525 goto err_put;
1528 if (cmd->qp_type != IB_QPT_XRC_TGT) {
1529 ret = ib_create_qp_security(qp, device);
1530 if (ret)
1531 goto err_cb;
1533 qp->real_qp = qp;
1534 qp->pd = pd;
1535 qp->send_cq = attr.send_cq;
1536 qp->recv_cq = attr.recv_cq;
1537 qp->srq = attr.srq;
1538 qp->rwq_ind_tbl = ind_tbl;
1539 qp->event_handler = attr.event_handler;
1540 qp->qp_context = attr.qp_context;
1541 qp->qp_type = attr.qp_type;
1542 atomic_set(&qp->usecnt, 0);
1543 atomic_inc(&pd->usecnt);
1544 qp->port = 0;
1545 if (attr.send_cq)
1546 atomic_inc(&attr.send_cq->usecnt);
1547 if (attr.recv_cq)
1548 atomic_inc(&attr.recv_cq->usecnt);
1549 if (attr.srq)
1550 atomic_inc(&attr.srq->usecnt);
1551 if (ind_tbl)
1552 atomic_inc(&ind_tbl->usecnt);
1554 qp->uobject = &obj->uevent.uobject;
1556 obj->uevent.uobject.object = qp;
1558 memset(&resp, 0, sizeof resp);
1559 resp.base.qpn = qp->qp_num;
1560 resp.base.qp_handle = obj->uevent.uobject.id;
1561 resp.base.max_recv_sge = attr.cap.max_recv_sge;
1562 resp.base.max_send_sge = attr.cap.max_send_sge;
1563 resp.base.max_recv_wr = attr.cap.max_recv_wr;
1564 resp.base.max_send_wr = attr.cap.max_send_wr;
1565 resp.base.max_inline_data = attr.cap.max_inline_data;
1567 resp.response_length = offsetof(typeof(resp), response_length) +
1568 sizeof(resp.response_length);
1570 ret = cb(file, &resp, ucore);
1571 if (ret)
1572 goto err_cb;
1574 if (xrcd) {
1575 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1576 uobject);
1577 atomic_inc(&obj->uxrcd->refcnt);
1578 uobj_put_read(xrcd_uobj);
1581 if (pd)
1582 uobj_put_obj_read(pd);
1583 if (scq)
1584 uobj_put_obj_read(scq);
1585 if (rcq && rcq != scq)
1586 uobj_put_obj_read(rcq);
1587 if (srq)
1588 uobj_put_obj_read(srq);
1589 if (ind_tbl)
1590 uobj_put_obj_read(ind_tbl);
1592 uobj_alloc_commit(&obj->uevent.uobject);
1594 return 0;
1595 err_cb:
1596 ib_destroy_qp(qp);
1598 err_put:
1599 if (!IS_ERR(xrcd_uobj))
1600 uobj_put_read(xrcd_uobj);
1601 if (pd)
1602 uobj_put_obj_read(pd);
1603 if (scq)
1604 uobj_put_obj_read(scq);
1605 if (rcq && rcq != scq)
1606 uobj_put_obj_read(rcq);
1607 if (srq)
1608 uobj_put_obj_read(srq);
1609 if (ind_tbl)
1610 uobj_put_obj_read(ind_tbl);
1612 uobj_alloc_abort(&obj->uevent.uobject);
1613 return ret;
1616 static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file,
1617 struct ib_uverbs_ex_create_qp_resp *resp,
1618 struct ib_udata *ucore)
1620 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1621 return -EFAULT;
1623 return 0;
1626 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1627 struct ib_device *ib_dev,
1628 const char __user *buf, int in_len,
1629 int out_len)
1631 struct ib_uverbs_create_qp cmd;
1632 struct ib_uverbs_ex_create_qp cmd_ex;
1633 struct ib_udata ucore;
1634 struct ib_udata uhw;
1635 ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp);
1636 int err;
1638 if (out_len < resp_size)
1639 return -ENOSPC;
1641 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1642 return -EFAULT;
1644 ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response),
1645 sizeof(cmd), resp_size);
1646 ib_uverbs_init_udata(&uhw, buf + sizeof(cmd),
1647 u64_to_user_ptr(cmd.response) + resp_size,
1648 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1649 out_len - resp_size);
1651 memset(&cmd_ex, 0, sizeof(cmd_ex));
1652 cmd_ex.user_handle = cmd.user_handle;
1653 cmd_ex.pd_handle = cmd.pd_handle;
1654 cmd_ex.send_cq_handle = cmd.send_cq_handle;
1655 cmd_ex.recv_cq_handle = cmd.recv_cq_handle;
1656 cmd_ex.srq_handle = cmd.srq_handle;
1657 cmd_ex.max_send_wr = cmd.max_send_wr;
1658 cmd_ex.max_recv_wr = cmd.max_recv_wr;
1659 cmd_ex.max_send_sge = cmd.max_send_sge;
1660 cmd_ex.max_recv_sge = cmd.max_recv_sge;
1661 cmd_ex.max_inline_data = cmd.max_inline_data;
1662 cmd_ex.sq_sig_all = cmd.sq_sig_all;
1663 cmd_ex.qp_type = cmd.qp_type;
1664 cmd_ex.is_srq = cmd.is_srq;
1666 err = create_qp(file, &ucore, &uhw, &cmd_ex,
1667 offsetof(typeof(cmd_ex), is_srq) +
1668 sizeof(cmd.is_srq), ib_uverbs_create_qp_cb,
1669 NULL);
1671 if (err)
1672 return err;
1674 return in_len;
1677 static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file,
1678 struct ib_uverbs_ex_create_qp_resp *resp,
1679 struct ib_udata *ucore)
1681 if (ib_copy_to_udata(ucore, resp, resp->response_length))
1682 return -EFAULT;
1684 return 0;
1687 int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file,
1688 struct ib_device *ib_dev,
1689 struct ib_udata *ucore,
1690 struct ib_udata *uhw)
1692 struct ib_uverbs_ex_create_qp_resp resp;
1693 struct ib_uverbs_ex_create_qp cmd = {0};
1694 int err;
1696 if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) +
1697 sizeof(cmd.comp_mask)))
1698 return -EINVAL;
1700 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
1701 if (err)
1702 return err;
1704 if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK)
1705 return -EINVAL;
1707 if (cmd.reserved)
1708 return -EINVAL;
1710 if (ucore->outlen < (offsetof(typeof(resp), response_length) +
1711 sizeof(resp.response_length)))
1712 return -ENOSPC;
1714 err = create_qp(file, ucore, uhw, &cmd,
1715 min(ucore->inlen, sizeof(cmd)),
1716 ib_uverbs_ex_create_qp_cb, NULL);
1718 if (err)
1719 return err;
1721 return 0;
1724 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
1725 struct ib_device *ib_dev,
1726 const char __user *buf, int in_len, int out_len)
1728 struct ib_uverbs_open_qp cmd;
1729 struct ib_uverbs_create_qp_resp resp;
1730 struct ib_udata udata;
1731 struct ib_uqp_object *obj;
1732 struct ib_xrcd *xrcd;
1733 struct ib_uobject *uninitialized_var(xrcd_uobj);
1734 struct ib_qp *qp;
1735 struct ib_qp_open_attr attr;
1736 int ret;
1738 if (out_len < sizeof resp)
1739 return -ENOSPC;
1741 if (copy_from_user(&cmd, buf, sizeof cmd))
1742 return -EFAULT;
1744 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
1745 u64_to_user_ptr(cmd.response) + sizeof(resp),
1746 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1747 out_len - sizeof(resp));
1749 obj = (struct ib_uqp_object *)uobj_alloc(uobj_get_type(qp),
1750 file->ucontext);
1751 if (IS_ERR(obj))
1752 return PTR_ERR(obj);
1754 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd.pd_handle,
1755 file->ucontext);
1756 if (IS_ERR(xrcd_uobj)) {
1757 ret = -EINVAL;
1758 goto err_put;
1761 xrcd = (struct ib_xrcd *)xrcd_uobj->object;
1762 if (!xrcd) {
1763 ret = -EINVAL;
1764 goto err_xrcd;
1767 attr.event_handler = ib_uverbs_qp_event_handler;
1768 attr.qp_context = file;
1769 attr.qp_num = cmd.qpn;
1770 attr.qp_type = cmd.qp_type;
1772 obj->uevent.events_reported = 0;
1773 INIT_LIST_HEAD(&obj->uevent.event_list);
1774 INIT_LIST_HEAD(&obj->mcast_list);
1776 qp = ib_open_qp(xrcd, &attr);
1777 if (IS_ERR(qp)) {
1778 ret = PTR_ERR(qp);
1779 goto err_xrcd;
1782 obj->uevent.uobject.object = qp;
1783 obj->uevent.uobject.user_handle = cmd.user_handle;
1785 memset(&resp, 0, sizeof resp);
1786 resp.qpn = qp->qp_num;
1787 resp.qp_handle = obj->uevent.uobject.id;
1789 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
1790 ret = -EFAULT;
1791 goto err_destroy;
1794 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
1795 atomic_inc(&obj->uxrcd->refcnt);
1796 qp->uobject = &obj->uevent.uobject;
1797 uobj_put_read(xrcd_uobj);
1800 uobj_alloc_commit(&obj->uevent.uobject);
1802 return in_len;
1804 err_destroy:
1805 ib_destroy_qp(qp);
1806 err_xrcd:
1807 uobj_put_read(xrcd_uobj);
1808 err_put:
1809 uobj_alloc_abort(&obj->uevent.uobject);
1810 return ret;
1813 static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr,
1814 struct rdma_ah_attr *rdma_attr)
1816 const struct ib_global_route *grh;
1818 uverb_attr->dlid = rdma_ah_get_dlid(rdma_attr);
1819 uverb_attr->sl = rdma_ah_get_sl(rdma_attr);
1820 uverb_attr->src_path_bits = rdma_ah_get_path_bits(rdma_attr);
1821 uverb_attr->static_rate = rdma_ah_get_static_rate(rdma_attr);
1822 uverb_attr->is_global = !!(rdma_ah_get_ah_flags(rdma_attr) &
1823 IB_AH_GRH);
1824 if (uverb_attr->is_global) {
1825 grh = rdma_ah_read_grh(rdma_attr);
1826 memcpy(uverb_attr->dgid, grh->dgid.raw, 16);
1827 uverb_attr->flow_label = grh->flow_label;
1828 uverb_attr->sgid_index = grh->sgid_index;
1829 uverb_attr->hop_limit = grh->hop_limit;
1830 uverb_attr->traffic_class = grh->traffic_class;
1832 uverb_attr->port_num = rdma_ah_get_port_num(rdma_attr);
1835 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
1836 struct ib_device *ib_dev,
1837 const char __user *buf, int in_len,
1838 int out_len)
1840 struct ib_uverbs_query_qp cmd;
1841 struct ib_uverbs_query_qp_resp resp;
1842 struct ib_qp *qp;
1843 struct ib_qp_attr *attr;
1844 struct ib_qp_init_attr *init_attr;
1845 int ret;
1847 if (copy_from_user(&cmd, buf, sizeof cmd))
1848 return -EFAULT;
1850 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1851 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1852 if (!attr || !init_attr) {
1853 ret = -ENOMEM;
1854 goto out;
1857 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
1858 if (!qp) {
1859 ret = -EINVAL;
1860 goto out;
1863 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1865 uobj_put_obj_read(qp);
1867 if (ret)
1868 goto out;
1870 memset(&resp, 0, sizeof resp);
1872 resp.qp_state = attr->qp_state;
1873 resp.cur_qp_state = attr->cur_qp_state;
1874 resp.path_mtu = attr->path_mtu;
1875 resp.path_mig_state = attr->path_mig_state;
1876 resp.qkey = attr->qkey;
1877 resp.rq_psn = attr->rq_psn;
1878 resp.sq_psn = attr->sq_psn;
1879 resp.dest_qp_num = attr->dest_qp_num;
1880 resp.qp_access_flags = attr->qp_access_flags;
1881 resp.pkey_index = attr->pkey_index;
1882 resp.alt_pkey_index = attr->alt_pkey_index;
1883 resp.sq_draining = attr->sq_draining;
1884 resp.max_rd_atomic = attr->max_rd_atomic;
1885 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic;
1886 resp.min_rnr_timer = attr->min_rnr_timer;
1887 resp.port_num = attr->port_num;
1888 resp.timeout = attr->timeout;
1889 resp.retry_cnt = attr->retry_cnt;
1890 resp.rnr_retry = attr->rnr_retry;
1891 resp.alt_port_num = attr->alt_port_num;
1892 resp.alt_timeout = attr->alt_timeout;
1894 copy_ah_attr_to_uverbs(&resp.dest, &attr->ah_attr);
1895 copy_ah_attr_to_uverbs(&resp.alt_dest, &attr->alt_ah_attr);
1897 resp.max_send_wr = init_attr->cap.max_send_wr;
1898 resp.max_recv_wr = init_attr->cap.max_recv_wr;
1899 resp.max_send_sge = init_attr->cap.max_send_sge;
1900 resp.max_recv_sge = init_attr->cap.max_recv_sge;
1901 resp.max_inline_data = init_attr->cap.max_inline_data;
1902 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1904 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
1905 ret = -EFAULT;
1907 out:
1908 kfree(attr);
1909 kfree(init_attr);
1911 return ret ? ret : in_len;
1914 /* Remove ignored fields set in the attribute mask */
1915 static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
1917 switch (qp_type) {
1918 case IB_QPT_XRC_INI:
1919 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
1920 case IB_QPT_XRC_TGT:
1921 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
1922 IB_QP_RNR_RETRY);
1923 default:
1924 return mask;
1928 static void copy_ah_attr_from_uverbs(struct ib_device *dev,
1929 struct rdma_ah_attr *rdma_attr,
1930 struct ib_uverbs_qp_dest *uverb_attr)
1932 rdma_attr->type = rdma_ah_find_type(dev, uverb_attr->port_num);
1933 if (uverb_attr->is_global) {
1934 rdma_ah_set_grh(rdma_attr, NULL,
1935 uverb_attr->flow_label,
1936 uverb_attr->sgid_index,
1937 uverb_attr->hop_limit,
1938 uverb_attr->traffic_class);
1939 rdma_ah_set_dgid_raw(rdma_attr, uverb_attr->dgid);
1940 } else {
1941 rdma_ah_set_ah_flags(rdma_attr, 0);
1943 rdma_ah_set_dlid(rdma_attr, uverb_attr->dlid);
1944 rdma_ah_set_sl(rdma_attr, uverb_attr->sl);
1945 rdma_ah_set_path_bits(rdma_attr, uverb_attr->src_path_bits);
1946 rdma_ah_set_static_rate(rdma_attr, uverb_attr->static_rate);
1947 rdma_ah_set_port_num(rdma_attr, uverb_attr->port_num);
1948 rdma_ah_set_make_grd(rdma_attr, false);
1951 static int modify_qp(struct ib_uverbs_file *file,
1952 struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata)
1954 struct ib_qp_attr *attr;
1955 struct ib_qp *qp;
1956 int ret;
1958 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1959 if (!attr)
1960 return -ENOMEM;
1962 qp = uobj_get_obj_read(qp, cmd->base.qp_handle, file->ucontext);
1963 if (!qp) {
1964 ret = -EINVAL;
1965 goto out;
1968 if ((cmd->base.attr_mask & IB_QP_PORT) &&
1969 !rdma_is_port_valid(qp->device, cmd->base.port_num)) {
1970 ret = -EINVAL;
1971 goto release_qp;
1974 if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
1975 !rdma_is_port_valid(qp->device, cmd->base.alt_port_num)) {
1976 ret = -EINVAL;
1977 goto release_qp;
1980 attr->qp_state = cmd->base.qp_state;
1981 attr->cur_qp_state = cmd->base.cur_qp_state;
1982 attr->path_mtu = cmd->base.path_mtu;
1983 attr->path_mig_state = cmd->base.path_mig_state;
1984 attr->qkey = cmd->base.qkey;
1985 attr->rq_psn = cmd->base.rq_psn;
1986 attr->sq_psn = cmd->base.sq_psn;
1987 attr->dest_qp_num = cmd->base.dest_qp_num;
1988 attr->qp_access_flags = cmd->base.qp_access_flags;
1989 attr->pkey_index = cmd->base.pkey_index;
1990 attr->alt_pkey_index = cmd->base.alt_pkey_index;
1991 attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
1992 attr->max_rd_atomic = cmd->base.max_rd_atomic;
1993 attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
1994 attr->min_rnr_timer = cmd->base.min_rnr_timer;
1995 attr->port_num = cmd->base.port_num;
1996 attr->timeout = cmd->base.timeout;
1997 attr->retry_cnt = cmd->base.retry_cnt;
1998 attr->rnr_retry = cmd->base.rnr_retry;
1999 attr->alt_port_num = cmd->base.alt_port_num;
2000 attr->alt_timeout = cmd->base.alt_timeout;
2001 attr->rate_limit = cmd->rate_limit;
2003 if (cmd->base.attr_mask & IB_QP_AV)
2004 copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
2005 &cmd->base.dest);
2007 if (cmd->base.attr_mask & IB_QP_ALT_PATH)
2008 copy_ah_attr_from_uverbs(qp->device, &attr->alt_ah_attr,
2009 &cmd->base.alt_dest);
2011 ret = ib_modify_qp_with_udata(qp, attr,
2012 modify_qp_mask(qp->qp_type,
2013 cmd->base.attr_mask),
2014 udata);
2016 release_qp:
2017 uobj_put_obj_read(qp);
2018 out:
2019 kfree(attr);
2021 return ret;
2024 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
2025 struct ib_device *ib_dev,
2026 const char __user *buf, int in_len,
2027 int out_len)
2029 struct ib_uverbs_ex_modify_qp cmd = {};
2030 struct ib_udata udata;
2031 int ret;
2033 if (copy_from_user(&cmd.base, buf, sizeof(cmd.base)))
2034 return -EFAULT;
2036 if (cmd.base.attr_mask &
2037 ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1))
2038 return -EOPNOTSUPP;
2040 ib_uverbs_init_udata(&udata, buf + sizeof(cmd.base), NULL,
2041 in_len - sizeof(cmd.base) - sizeof(struct ib_uverbs_cmd_hdr),
2042 out_len);
2044 ret = modify_qp(file, &cmd, &udata);
2045 if (ret)
2046 return ret;
2048 return in_len;
2051 int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file,
2052 struct ib_device *ib_dev,
2053 struct ib_udata *ucore,
2054 struct ib_udata *uhw)
2056 struct ib_uverbs_ex_modify_qp cmd = {};
2057 int ret;
2060 * Last bit is reserved for extending the attr_mask by
2061 * using another field.
2063 BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31));
2065 if (ucore->inlen < sizeof(cmd.base))
2066 return -EINVAL;
2068 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
2069 if (ret)
2070 return ret;
2072 if (cmd.base.attr_mask &
2073 ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1))
2074 return -EOPNOTSUPP;
2076 if (ucore->inlen > sizeof(cmd)) {
2077 if (!ib_is_udata_cleared(ucore, sizeof(cmd),
2078 ucore->inlen - sizeof(cmd)))
2079 return -EOPNOTSUPP;
2082 ret = modify_qp(file, &cmd, uhw);
2084 return ret;
2087 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
2088 struct ib_device *ib_dev,
2089 const char __user *buf, int in_len,
2090 int out_len)
2092 struct ib_uverbs_destroy_qp cmd;
2093 struct ib_uverbs_destroy_qp_resp resp;
2094 struct ib_uobject *uobj;
2095 struct ib_uqp_object *obj;
2096 int ret = -EINVAL;
2098 if (copy_from_user(&cmd, buf, sizeof cmd))
2099 return -EFAULT;
2101 memset(&resp, 0, sizeof resp);
2103 uobj = uobj_get_write(uobj_get_type(qp), cmd.qp_handle,
2104 file->ucontext);
2105 if (IS_ERR(uobj))
2106 return PTR_ERR(uobj);
2108 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
2110 * Make sure we don't free the memory in remove_commit as we still
2111 * needs the uobject memory to create the response.
2113 uverbs_uobject_get(uobj);
2115 ret = uobj_remove_commit(uobj);
2116 if (ret) {
2117 uverbs_uobject_put(uobj);
2118 return ret;
2121 resp.events_reported = obj->uevent.events_reported;
2122 uverbs_uobject_put(uobj);
2124 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
2125 return -EFAULT;
2127 return in_len;
2130 static void *alloc_wr(size_t wr_size, __u32 num_sge)
2132 if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) /
2133 sizeof (struct ib_sge))
2134 return NULL;
2136 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
2137 num_sge * sizeof (struct ib_sge), GFP_KERNEL);
2140 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2141 struct ib_device *ib_dev,
2142 const char __user *buf, int in_len,
2143 int out_len)
2145 struct ib_uverbs_post_send cmd;
2146 struct ib_uverbs_post_send_resp resp;
2147 struct ib_uverbs_send_wr *user_wr;
2148 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
2149 struct ib_qp *qp;
2150 int i, sg_ind;
2151 int is_ud;
2152 ssize_t ret = -EINVAL;
2153 size_t next_size;
2155 if (copy_from_user(&cmd, buf, sizeof cmd))
2156 return -EFAULT;
2158 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
2159 cmd.sge_count * sizeof (struct ib_uverbs_sge))
2160 return -EINVAL;
2162 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
2163 return -EINVAL;
2165 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2166 if (!user_wr)
2167 return -ENOMEM;
2169 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
2170 if (!qp)
2171 goto out;
2173 is_ud = qp->qp_type == IB_QPT_UD;
2174 sg_ind = 0;
2175 last = NULL;
2176 for (i = 0; i < cmd.wr_count; ++i) {
2177 if (copy_from_user(user_wr,
2178 buf + sizeof cmd + i * cmd.wqe_size,
2179 cmd.wqe_size)) {
2180 ret = -EFAULT;
2181 goto out_put;
2184 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2185 ret = -EINVAL;
2186 goto out_put;
2189 if (is_ud) {
2190 struct ib_ud_wr *ud;
2192 if (user_wr->opcode != IB_WR_SEND &&
2193 user_wr->opcode != IB_WR_SEND_WITH_IMM) {
2194 ret = -EINVAL;
2195 goto out_put;
2198 next_size = sizeof(*ud);
2199 ud = alloc_wr(next_size, user_wr->num_sge);
2200 if (!ud) {
2201 ret = -ENOMEM;
2202 goto out_put;
2205 ud->ah = uobj_get_obj_read(ah, user_wr->wr.ud.ah,
2206 file->ucontext);
2207 if (!ud->ah) {
2208 kfree(ud);
2209 ret = -EINVAL;
2210 goto out_put;
2212 ud->remote_qpn = user_wr->wr.ud.remote_qpn;
2213 ud->remote_qkey = user_wr->wr.ud.remote_qkey;
2215 next = &ud->wr;
2216 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2217 user_wr->opcode == IB_WR_RDMA_WRITE ||
2218 user_wr->opcode == IB_WR_RDMA_READ) {
2219 struct ib_rdma_wr *rdma;
2221 next_size = sizeof(*rdma);
2222 rdma = alloc_wr(next_size, user_wr->num_sge);
2223 if (!rdma) {
2224 ret = -ENOMEM;
2225 goto out_put;
2228 rdma->remote_addr = user_wr->wr.rdma.remote_addr;
2229 rdma->rkey = user_wr->wr.rdma.rkey;
2231 next = &rdma->wr;
2232 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2233 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2234 struct ib_atomic_wr *atomic;
2236 next_size = sizeof(*atomic);
2237 atomic = alloc_wr(next_size, user_wr->num_sge);
2238 if (!atomic) {
2239 ret = -ENOMEM;
2240 goto out_put;
2243 atomic->remote_addr = user_wr->wr.atomic.remote_addr;
2244 atomic->compare_add = user_wr->wr.atomic.compare_add;
2245 atomic->swap = user_wr->wr.atomic.swap;
2246 atomic->rkey = user_wr->wr.atomic.rkey;
2248 next = &atomic->wr;
2249 } else if (user_wr->opcode == IB_WR_SEND ||
2250 user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2251 user_wr->opcode == IB_WR_SEND_WITH_INV) {
2252 next_size = sizeof(*next);
2253 next = alloc_wr(next_size, user_wr->num_sge);
2254 if (!next) {
2255 ret = -ENOMEM;
2256 goto out_put;
2258 } else {
2259 ret = -EINVAL;
2260 goto out_put;
2263 if (user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2264 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
2265 next->ex.imm_data =
2266 (__be32 __force) user_wr->ex.imm_data;
2267 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) {
2268 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey;
2271 if (!last)
2272 wr = next;
2273 else
2274 last->next = next;
2275 last = next;
2277 next->next = NULL;
2278 next->wr_id = user_wr->wr_id;
2279 next->num_sge = user_wr->num_sge;
2280 next->opcode = user_wr->opcode;
2281 next->send_flags = user_wr->send_flags;
2283 if (next->num_sge) {
2284 next->sg_list = (void *) next +
2285 ALIGN(next_size, sizeof(struct ib_sge));
2286 if (copy_from_user(next->sg_list,
2287 buf + sizeof cmd +
2288 cmd.wr_count * cmd.wqe_size +
2289 sg_ind * sizeof (struct ib_sge),
2290 next->num_sge * sizeof (struct ib_sge))) {
2291 ret = -EFAULT;
2292 goto out_put;
2294 sg_ind += next->num_sge;
2295 } else
2296 next->sg_list = NULL;
2299 resp.bad_wr = 0;
2300 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
2301 if (ret)
2302 for (next = wr; next; next = next->next) {
2303 ++resp.bad_wr;
2304 if (next == bad_wr)
2305 break;
2308 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
2309 ret = -EFAULT;
2311 out_put:
2312 uobj_put_obj_read(qp);
2314 while (wr) {
2315 if (is_ud && ud_wr(wr)->ah)
2316 uobj_put_obj_read(ud_wr(wr)->ah);
2317 next = wr->next;
2318 kfree(wr);
2319 wr = next;
2322 out:
2323 kfree(user_wr);
2325 return ret ? ret : in_len;
2328 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2329 int in_len,
2330 u32 wr_count,
2331 u32 sge_count,
2332 u32 wqe_size)
2334 struct ib_uverbs_recv_wr *user_wr;
2335 struct ib_recv_wr *wr = NULL, *last, *next;
2336 int sg_ind;
2337 int i;
2338 int ret;
2340 if (in_len < wqe_size * wr_count +
2341 sge_count * sizeof (struct ib_uverbs_sge))
2342 return ERR_PTR(-EINVAL);
2344 if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2345 return ERR_PTR(-EINVAL);
2347 user_wr = kmalloc(wqe_size, GFP_KERNEL);
2348 if (!user_wr)
2349 return ERR_PTR(-ENOMEM);
2351 sg_ind = 0;
2352 last = NULL;
2353 for (i = 0; i < wr_count; ++i) {
2354 if (copy_from_user(user_wr, buf + i * wqe_size,
2355 wqe_size)) {
2356 ret = -EFAULT;
2357 goto err;
2360 if (user_wr->num_sge + sg_ind > sge_count) {
2361 ret = -EINVAL;
2362 goto err;
2365 if (user_wr->num_sge >=
2366 (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) /
2367 sizeof (struct ib_sge)) {
2368 ret = -EINVAL;
2369 goto err;
2372 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2373 user_wr->num_sge * sizeof (struct ib_sge),
2374 GFP_KERNEL);
2375 if (!next) {
2376 ret = -ENOMEM;
2377 goto err;
2380 if (!last)
2381 wr = next;
2382 else
2383 last->next = next;
2384 last = next;
2386 next->next = NULL;
2387 next->wr_id = user_wr->wr_id;
2388 next->num_sge = user_wr->num_sge;
2390 if (next->num_sge) {
2391 next->sg_list = (void *) next +
2392 ALIGN(sizeof *next, sizeof (struct ib_sge));
2393 if (copy_from_user(next->sg_list,
2394 buf + wr_count * wqe_size +
2395 sg_ind * sizeof (struct ib_sge),
2396 next->num_sge * sizeof (struct ib_sge))) {
2397 ret = -EFAULT;
2398 goto err;
2400 sg_ind += next->num_sge;
2401 } else
2402 next->sg_list = NULL;
2405 kfree(user_wr);
2406 return wr;
2408 err:
2409 kfree(user_wr);
2411 while (wr) {
2412 next = wr->next;
2413 kfree(wr);
2414 wr = next;
2417 return ERR_PTR(ret);
2420 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
2421 struct ib_device *ib_dev,
2422 const char __user *buf, int in_len,
2423 int out_len)
2425 struct ib_uverbs_post_recv cmd;
2426 struct ib_uverbs_post_recv_resp resp;
2427 struct ib_recv_wr *wr, *next, *bad_wr;
2428 struct ib_qp *qp;
2429 ssize_t ret = -EINVAL;
2431 if (copy_from_user(&cmd, buf, sizeof cmd))
2432 return -EFAULT;
2434 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2435 in_len - sizeof cmd, cmd.wr_count,
2436 cmd.sge_count, cmd.wqe_size);
2437 if (IS_ERR(wr))
2438 return PTR_ERR(wr);
2440 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
2441 if (!qp)
2442 goto out;
2444 resp.bad_wr = 0;
2445 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
2447 uobj_put_obj_read(qp);
2448 if (ret) {
2449 for (next = wr; next; next = next->next) {
2450 ++resp.bad_wr;
2451 if (next == bad_wr)
2452 break;
2456 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
2457 ret = -EFAULT;
2459 out:
2460 while (wr) {
2461 next = wr->next;
2462 kfree(wr);
2463 wr = next;
2466 return ret ? ret : in_len;
2469 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
2470 struct ib_device *ib_dev,
2471 const char __user *buf, int in_len,
2472 int out_len)
2474 struct ib_uverbs_post_srq_recv cmd;
2475 struct ib_uverbs_post_srq_recv_resp resp;
2476 struct ib_recv_wr *wr, *next, *bad_wr;
2477 struct ib_srq *srq;
2478 ssize_t ret = -EINVAL;
2480 if (copy_from_user(&cmd, buf, sizeof cmd))
2481 return -EFAULT;
2483 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2484 in_len - sizeof cmd, cmd.wr_count,
2485 cmd.sge_count, cmd.wqe_size);
2486 if (IS_ERR(wr))
2487 return PTR_ERR(wr);
2489 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext);
2490 if (!srq)
2491 goto out;
2493 resp.bad_wr = 0;
2494 ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
2496 uobj_put_obj_read(srq);
2498 if (ret)
2499 for (next = wr; next; next = next->next) {
2500 ++resp.bad_wr;
2501 if (next == bad_wr)
2502 break;
2505 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
2506 ret = -EFAULT;
2508 out:
2509 while (wr) {
2510 next = wr->next;
2511 kfree(wr);
2512 wr = next;
2515 return ret ? ret : in_len;
2518 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
2519 struct ib_device *ib_dev,
2520 const char __user *buf, int in_len,
2521 int out_len)
2523 struct ib_uverbs_create_ah cmd;
2524 struct ib_uverbs_create_ah_resp resp;
2525 struct ib_uobject *uobj;
2526 struct ib_pd *pd;
2527 struct ib_ah *ah;
2528 struct rdma_ah_attr attr;
2529 int ret;
2530 struct ib_udata udata;
2532 if (out_len < sizeof resp)
2533 return -ENOSPC;
2535 if (copy_from_user(&cmd, buf, sizeof cmd))
2536 return -EFAULT;
2538 if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num))
2539 return -EINVAL;
2541 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
2542 u64_to_user_ptr(cmd.response) + sizeof(resp),
2543 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
2544 out_len - sizeof(resp));
2546 uobj = uobj_alloc(uobj_get_type(ah), file->ucontext);
2547 if (IS_ERR(uobj))
2548 return PTR_ERR(uobj);
2550 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
2551 if (!pd) {
2552 ret = -EINVAL;
2553 goto err;
2556 attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num);
2557 rdma_ah_set_make_grd(&attr, false);
2558 rdma_ah_set_dlid(&attr, cmd.attr.dlid);
2559 rdma_ah_set_sl(&attr, cmd.attr.sl);
2560 rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits);
2561 rdma_ah_set_static_rate(&attr, cmd.attr.static_rate);
2562 rdma_ah_set_port_num(&attr, cmd.attr.port_num);
2564 if (cmd.attr.is_global) {
2565 rdma_ah_set_grh(&attr, NULL, cmd.attr.grh.flow_label,
2566 cmd.attr.grh.sgid_index,
2567 cmd.attr.grh.hop_limit,
2568 cmd.attr.grh.traffic_class);
2569 rdma_ah_set_dgid_raw(&attr, cmd.attr.grh.dgid);
2570 } else {
2571 rdma_ah_set_ah_flags(&attr, 0);
2574 ah = rdma_create_user_ah(pd, &attr, &udata);
2575 if (IS_ERR(ah)) {
2576 ret = PTR_ERR(ah);
2577 goto err_put;
2580 ah->uobject = uobj;
2581 uobj->user_handle = cmd.user_handle;
2582 uobj->object = ah;
2584 resp.ah_handle = uobj->id;
2586 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
2587 ret = -EFAULT;
2588 goto err_copy;
2591 uobj_put_obj_read(pd);
2592 uobj_alloc_commit(uobj);
2594 return in_len;
2596 err_copy:
2597 rdma_destroy_ah(ah);
2599 err_put:
2600 uobj_put_obj_read(pd);
2602 err:
2603 uobj_alloc_abort(uobj);
2604 return ret;
2607 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
2608 struct ib_device *ib_dev,
2609 const char __user *buf, int in_len, int out_len)
2611 struct ib_uverbs_destroy_ah cmd;
2612 struct ib_uobject *uobj;
2613 int ret;
2615 if (copy_from_user(&cmd, buf, sizeof cmd))
2616 return -EFAULT;
2618 uobj = uobj_get_write(uobj_get_type(ah), cmd.ah_handle,
2619 file->ucontext);
2620 if (IS_ERR(uobj))
2621 return PTR_ERR(uobj);
2623 ret = uobj_remove_commit(uobj);
2624 return ret ?: in_len;
2627 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
2628 struct ib_device *ib_dev,
2629 const char __user *buf, int in_len,
2630 int out_len)
2632 struct ib_uverbs_attach_mcast cmd;
2633 struct ib_qp *qp;
2634 struct ib_uqp_object *obj;
2635 struct ib_uverbs_mcast_entry *mcast;
2636 int ret;
2638 if (copy_from_user(&cmd, buf, sizeof cmd))
2639 return -EFAULT;
2641 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
2642 if (!qp)
2643 return -EINVAL;
2645 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2647 mutex_lock(&obj->mcast_lock);
2648 list_for_each_entry(mcast, &obj->mcast_list, list)
2649 if (cmd.mlid == mcast->lid &&
2650 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2651 ret = 0;
2652 goto out_put;
2655 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2656 if (!mcast) {
2657 ret = -ENOMEM;
2658 goto out_put;
2661 mcast->lid = cmd.mlid;
2662 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2664 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2665 if (!ret)
2666 list_add_tail(&mcast->list, &obj->mcast_list);
2667 else
2668 kfree(mcast);
2670 out_put:
2671 mutex_unlock(&obj->mcast_lock);
2672 uobj_put_obj_read(qp);
2674 return ret ? ret : in_len;
2677 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
2678 struct ib_device *ib_dev,
2679 const char __user *buf, int in_len,
2680 int out_len)
2682 struct ib_uverbs_detach_mcast cmd;
2683 struct ib_uqp_object *obj;
2684 struct ib_qp *qp;
2685 struct ib_uverbs_mcast_entry *mcast;
2686 int ret = -EINVAL;
2687 bool found = false;
2689 if (copy_from_user(&cmd, buf, sizeof cmd))
2690 return -EFAULT;
2692 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
2693 if (!qp)
2694 return -EINVAL;
2696 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2697 mutex_lock(&obj->mcast_lock);
2699 list_for_each_entry(mcast, &obj->mcast_list, list)
2700 if (cmd.mlid == mcast->lid &&
2701 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2702 list_del(&mcast->list);
2703 kfree(mcast);
2704 found = true;
2705 break;
2708 if (!found) {
2709 ret = -EINVAL;
2710 goto out_put;
2713 ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid);
2715 out_put:
2716 mutex_unlock(&obj->mcast_lock);
2717 uobj_put_obj_read(qp);
2718 return ret ? ret : in_len;
2721 static int kern_spec_to_ib_spec_action(struct ib_uverbs_flow_spec *kern_spec,
2722 union ib_flow_spec *ib_spec)
2724 ib_spec->type = kern_spec->type;
2725 switch (ib_spec->type) {
2726 case IB_FLOW_SPEC_ACTION_TAG:
2727 if (kern_spec->flow_tag.size !=
2728 sizeof(struct ib_uverbs_flow_spec_action_tag))
2729 return -EINVAL;
2731 ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag);
2732 ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id;
2733 break;
2734 case IB_FLOW_SPEC_ACTION_DROP:
2735 if (kern_spec->drop.size !=
2736 sizeof(struct ib_uverbs_flow_spec_action_drop))
2737 return -EINVAL;
2739 ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop);
2740 break;
2741 default:
2742 return -EINVAL;
2744 return 0;
2747 static size_t kern_spec_filter_sz(struct ib_uverbs_flow_spec_hdr *spec)
2749 /* Returns user space filter size, includes padding */
2750 return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2;
2753 static ssize_t spec_filter_size(void *kern_spec_filter, u16 kern_filter_size,
2754 u16 ib_real_filter_sz)
2757 * User space filter structures must be 64 bit aligned, otherwise this
2758 * may pass, but we won't handle additional new attributes.
2761 if (kern_filter_size > ib_real_filter_sz) {
2762 if (memchr_inv(kern_spec_filter +
2763 ib_real_filter_sz, 0,
2764 kern_filter_size - ib_real_filter_sz))
2765 return -EINVAL;
2766 return ib_real_filter_sz;
2768 return kern_filter_size;
2771 static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
2772 union ib_flow_spec *ib_spec)
2774 ssize_t actual_filter_sz;
2775 ssize_t kern_filter_sz;
2776 ssize_t ib_filter_sz;
2777 void *kern_spec_mask;
2778 void *kern_spec_val;
2780 if (kern_spec->reserved)
2781 return -EINVAL;
2783 ib_spec->type = kern_spec->type;
2785 kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr);
2786 /* User flow spec size must be aligned to 4 bytes */
2787 if (kern_filter_sz != ALIGN(kern_filter_sz, 4))
2788 return -EINVAL;
2790 kern_spec_val = (void *)kern_spec +
2791 sizeof(struct ib_uverbs_flow_spec_hdr);
2792 kern_spec_mask = kern_spec_val + kern_filter_sz;
2793 if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL))
2794 return -EINVAL;
2796 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
2797 case IB_FLOW_SPEC_ETH:
2798 ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
2799 actual_filter_sz = spec_filter_size(kern_spec_mask,
2800 kern_filter_sz,
2801 ib_filter_sz);
2802 if (actual_filter_sz <= 0)
2803 return -EINVAL;
2804 ib_spec->size = sizeof(struct ib_flow_spec_eth);
2805 memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz);
2806 memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz);
2807 break;
2808 case IB_FLOW_SPEC_IPV4:
2809 ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz);
2810 actual_filter_sz = spec_filter_size(kern_spec_mask,
2811 kern_filter_sz,
2812 ib_filter_sz);
2813 if (actual_filter_sz <= 0)
2814 return -EINVAL;
2815 ib_spec->size = sizeof(struct ib_flow_spec_ipv4);
2816 memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz);
2817 memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz);
2818 break;
2819 case IB_FLOW_SPEC_IPV6:
2820 ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz);
2821 actual_filter_sz = spec_filter_size(kern_spec_mask,
2822 kern_filter_sz,
2823 ib_filter_sz);
2824 if (actual_filter_sz <= 0)
2825 return -EINVAL;
2826 ib_spec->size = sizeof(struct ib_flow_spec_ipv6);
2827 memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz);
2828 memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz);
2830 if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) ||
2831 (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20))
2832 return -EINVAL;
2833 break;
2834 case IB_FLOW_SPEC_TCP:
2835 case IB_FLOW_SPEC_UDP:
2836 ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz);
2837 actual_filter_sz = spec_filter_size(kern_spec_mask,
2838 kern_filter_sz,
2839 ib_filter_sz);
2840 if (actual_filter_sz <= 0)
2841 return -EINVAL;
2842 ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp);
2843 memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz);
2844 memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
2845 break;
2846 case IB_FLOW_SPEC_VXLAN_TUNNEL:
2847 ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz);
2848 actual_filter_sz = spec_filter_size(kern_spec_mask,
2849 kern_filter_sz,
2850 ib_filter_sz);
2851 if (actual_filter_sz <= 0)
2852 return -EINVAL;
2853 ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel);
2854 memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz);
2855 memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz);
2857 if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) ||
2858 (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24))
2859 return -EINVAL;
2860 break;
2861 default:
2862 return -EINVAL;
2864 return 0;
2867 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
2868 union ib_flow_spec *ib_spec)
2870 if (kern_spec->reserved)
2871 return -EINVAL;
2873 if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG)
2874 return kern_spec_to_ib_spec_action(kern_spec, ib_spec);
2875 else
2876 return kern_spec_to_ib_spec_filter(kern_spec, ib_spec);
2879 int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,
2880 struct ib_device *ib_dev,
2881 struct ib_udata *ucore,
2882 struct ib_udata *uhw)
2884 struct ib_uverbs_ex_create_wq cmd = {};
2885 struct ib_uverbs_ex_create_wq_resp resp = {};
2886 struct ib_uwq_object *obj;
2887 int err = 0;
2888 struct ib_cq *cq;
2889 struct ib_pd *pd;
2890 struct ib_wq *wq;
2891 struct ib_wq_init_attr wq_init_attr = {};
2892 size_t required_cmd_sz;
2893 size_t required_resp_len;
2895 required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge);
2896 required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn);
2898 if (ucore->inlen < required_cmd_sz)
2899 return -EINVAL;
2901 if (ucore->outlen < required_resp_len)
2902 return -ENOSPC;
2904 if (ucore->inlen > sizeof(cmd) &&
2905 !ib_is_udata_cleared(ucore, sizeof(cmd),
2906 ucore->inlen - sizeof(cmd)))
2907 return -EOPNOTSUPP;
2909 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
2910 if (err)
2911 return err;
2913 if (cmd.comp_mask)
2914 return -EOPNOTSUPP;
2916 obj = (struct ib_uwq_object *)uobj_alloc(uobj_get_type(wq),
2917 file->ucontext);
2918 if (IS_ERR(obj))
2919 return PTR_ERR(obj);
2921 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
2922 if (!pd) {
2923 err = -EINVAL;
2924 goto err_uobj;
2927 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
2928 if (!cq) {
2929 err = -EINVAL;
2930 goto err_put_pd;
2933 wq_init_attr.cq = cq;
2934 wq_init_attr.max_sge = cmd.max_sge;
2935 wq_init_attr.max_wr = cmd.max_wr;
2936 wq_init_attr.wq_context = file;
2937 wq_init_attr.wq_type = cmd.wq_type;
2938 wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
2939 if (ucore->inlen >= (offsetof(typeof(cmd), create_flags) +
2940 sizeof(cmd.create_flags)))
2941 wq_init_attr.create_flags = cmd.create_flags;
2942 obj->uevent.events_reported = 0;
2943 INIT_LIST_HEAD(&obj->uevent.event_list);
2944 wq = pd->device->create_wq(pd, &wq_init_attr, uhw);
2945 if (IS_ERR(wq)) {
2946 err = PTR_ERR(wq);
2947 goto err_put_cq;
2950 wq->uobject = &obj->uevent.uobject;
2951 obj->uevent.uobject.object = wq;
2952 wq->wq_type = wq_init_attr.wq_type;
2953 wq->cq = cq;
2954 wq->pd = pd;
2955 wq->device = pd->device;
2956 wq->wq_context = wq_init_attr.wq_context;
2957 atomic_set(&wq->usecnt, 0);
2958 atomic_inc(&pd->usecnt);
2959 atomic_inc(&cq->usecnt);
2960 wq->uobject = &obj->uevent.uobject;
2961 obj->uevent.uobject.object = wq;
2963 memset(&resp, 0, sizeof(resp));
2964 resp.wq_handle = obj->uevent.uobject.id;
2965 resp.max_sge = wq_init_attr.max_sge;
2966 resp.max_wr = wq_init_attr.max_wr;
2967 resp.wqn = wq->wq_num;
2968 resp.response_length = required_resp_len;
2969 err = ib_copy_to_udata(ucore,
2970 &resp, resp.response_length);
2971 if (err)
2972 goto err_copy;
2974 uobj_put_obj_read(pd);
2975 uobj_put_obj_read(cq);
2976 uobj_alloc_commit(&obj->uevent.uobject);
2977 return 0;
2979 err_copy:
2980 ib_destroy_wq(wq);
2981 err_put_cq:
2982 uobj_put_obj_read(cq);
2983 err_put_pd:
2984 uobj_put_obj_read(pd);
2985 err_uobj:
2986 uobj_alloc_abort(&obj->uevent.uobject);
2988 return err;
2991 int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
2992 struct ib_device *ib_dev,
2993 struct ib_udata *ucore,
2994 struct ib_udata *uhw)
2996 struct ib_uverbs_ex_destroy_wq cmd = {};
2997 struct ib_uverbs_ex_destroy_wq_resp resp = {};
2998 struct ib_uobject *uobj;
2999 struct ib_uwq_object *obj;
3000 size_t required_cmd_sz;
3001 size_t required_resp_len;
3002 int ret;
3004 required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle);
3005 required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
3007 if (ucore->inlen < required_cmd_sz)
3008 return -EINVAL;
3010 if (ucore->outlen < required_resp_len)
3011 return -ENOSPC;
3013 if (ucore->inlen > sizeof(cmd) &&
3014 !ib_is_udata_cleared(ucore, sizeof(cmd),
3015 ucore->inlen - sizeof(cmd)))
3016 return -EOPNOTSUPP;
3018 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3019 if (ret)
3020 return ret;
3022 if (cmd.comp_mask)
3023 return -EOPNOTSUPP;
3025 resp.response_length = required_resp_len;
3026 uobj = uobj_get_write(uobj_get_type(wq), cmd.wq_handle,
3027 file->ucontext);
3028 if (IS_ERR(uobj))
3029 return PTR_ERR(uobj);
3031 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
3033 * Make sure we don't free the memory in remove_commit as we still
3034 * needs the uobject memory to create the response.
3036 uverbs_uobject_get(uobj);
3038 ret = uobj_remove_commit(uobj);
3039 resp.events_reported = obj->uevent.events_reported;
3040 uverbs_uobject_put(uobj);
3041 if (ret)
3042 return ret;
3044 return ib_copy_to_udata(ucore, &resp, resp.response_length);
3047 int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file,
3048 struct ib_device *ib_dev,
3049 struct ib_udata *ucore,
3050 struct ib_udata *uhw)
3052 struct ib_uverbs_ex_modify_wq cmd = {};
3053 struct ib_wq *wq;
3054 struct ib_wq_attr wq_attr = {};
3055 size_t required_cmd_sz;
3056 int ret;
3058 required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state);
3059 if (ucore->inlen < required_cmd_sz)
3060 return -EINVAL;
3062 if (ucore->inlen > sizeof(cmd) &&
3063 !ib_is_udata_cleared(ucore, sizeof(cmd),
3064 ucore->inlen - sizeof(cmd)))
3065 return -EOPNOTSUPP;
3067 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3068 if (ret)
3069 return ret;
3071 if (!cmd.attr_mask)
3072 return -EINVAL;
3074 if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS))
3075 return -EINVAL;
3077 wq = uobj_get_obj_read(wq, cmd.wq_handle, file->ucontext);
3078 if (!wq)
3079 return -EINVAL;
3081 wq_attr.curr_wq_state = cmd.curr_wq_state;
3082 wq_attr.wq_state = cmd.wq_state;
3083 if (cmd.attr_mask & IB_WQ_FLAGS) {
3084 wq_attr.flags = cmd.flags;
3085 wq_attr.flags_mask = cmd.flags_mask;
3087 ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw);
3088 uobj_put_obj_read(wq);
3089 return ret;
3092 int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,
3093 struct ib_device *ib_dev,
3094 struct ib_udata *ucore,
3095 struct ib_udata *uhw)
3097 struct ib_uverbs_ex_create_rwq_ind_table cmd = {};
3098 struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {};
3099 struct ib_uobject *uobj;
3100 int err = 0;
3101 struct ib_rwq_ind_table_init_attr init_attr = {};
3102 struct ib_rwq_ind_table *rwq_ind_tbl;
3103 struct ib_wq **wqs = NULL;
3104 u32 *wqs_handles = NULL;
3105 struct ib_wq *wq = NULL;
3106 int i, j, num_read_wqs;
3107 u32 num_wq_handles;
3108 u32 expected_in_size;
3109 size_t required_cmd_sz_header;
3110 size_t required_resp_len;
3112 required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size);
3113 required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num);
3115 if (ucore->inlen < required_cmd_sz_header)
3116 return -EINVAL;
3118 if (ucore->outlen < required_resp_len)
3119 return -ENOSPC;
3121 err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header);
3122 if (err)
3123 return err;
3125 ucore->inbuf += required_cmd_sz_header;
3126 ucore->inlen -= required_cmd_sz_header;
3128 if (cmd.comp_mask)
3129 return -EOPNOTSUPP;
3131 if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE)
3132 return -EINVAL;
3134 num_wq_handles = 1 << cmd.log_ind_tbl_size;
3135 expected_in_size = num_wq_handles * sizeof(__u32);
3136 if (num_wq_handles == 1)
3137 /* input size for wq handles is u64 aligned */
3138 expected_in_size += sizeof(__u32);
3140 if (ucore->inlen < expected_in_size)
3141 return -EINVAL;
3143 if (ucore->inlen > expected_in_size &&
3144 !ib_is_udata_cleared(ucore, expected_in_size,
3145 ucore->inlen - expected_in_size))
3146 return -EOPNOTSUPP;
3148 wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles),
3149 GFP_KERNEL);
3150 if (!wqs_handles)
3151 return -ENOMEM;
3153 err = ib_copy_from_udata(wqs_handles, ucore,
3154 num_wq_handles * sizeof(__u32));
3155 if (err)
3156 goto err_free;
3158 wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL);
3159 if (!wqs) {
3160 err = -ENOMEM;
3161 goto err_free;
3164 for (num_read_wqs = 0; num_read_wqs < num_wq_handles;
3165 num_read_wqs++) {
3166 wq = uobj_get_obj_read(wq, wqs_handles[num_read_wqs],
3167 file->ucontext);
3168 if (!wq) {
3169 err = -EINVAL;
3170 goto put_wqs;
3173 wqs[num_read_wqs] = wq;
3176 uobj = uobj_alloc(uobj_get_type(rwq_ind_table), file->ucontext);
3177 if (IS_ERR(uobj)) {
3178 err = PTR_ERR(uobj);
3179 goto put_wqs;
3182 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
3183 init_attr.ind_tbl = wqs;
3184 rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw);
3186 if (IS_ERR(rwq_ind_tbl)) {
3187 err = PTR_ERR(rwq_ind_tbl);
3188 goto err_uobj;
3191 rwq_ind_tbl->ind_tbl = wqs;
3192 rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size;
3193 rwq_ind_tbl->uobject = uobj;
3194 uobj->object = rwq_ind_tbl;
3195 rwq_ind_tbl->device = ib_dev;
3196 atomic_set(&rwq_ind_tbl->usecnt, 0);
3198 for (i = 0; i < num_wq_handles; i++)
3199 atomic_inc(&wqs[i]->usecnt);
3201 resp.ind_tbl_handle = uobj->id;
3202 resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num;
3203 resp.response_length = required_resp_len;
3205 err = ib_copy_to_udata(ucore,
3206 &resp, resp.response_length);
3207 if (err)
3208 goto err_copy;
3210 kfree(wqs_handles);
3212 for (j = 0; j < num_read_wqs; j++)
3213 uobj_put_obj_read(wqs[j]);
3215 uobj_alloc_commit(uobj);
3216 return 0;
3218 err_copy:
3219 ib_destroy_rwq_ind_table(rwq_ind_tbl);
3220 err_uobj:
3221 uobj_alloc_abort(uobj);
3222 put_wqs:
3223 for (j = 0; j < num_read_wqs; j++)
3224 uobj_put_obj_read(wqs[j]);
3225 err_free:
3226 kfree(wqs_handles);
3227 kfree(wqs);
3228 return err;
3231 int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file,
3232 struct ib_device *ib_dev,
3233 struct ib_udata *ucore,
3234 struct ib_udata *uhw)
3236 struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {};
3237 struct ib_uobject *uobj;
3238 int ret;
3239 size_t required_cmd_sz;
3241 required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle);
3243 if (ucore->inlen < required_cmd_sz)
3244 return -EINVAL;
3246 if (ucore->inlen > sizeof(cmd) &&
3247 !ib_is_udata_cleared(ucore, sizeof(cmd),
3248 ucore->inlen - sizeof(cmd)))
3249 return -EOPNOTSUPP;
3251 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3252 if (ret)
3253 return ret;
3255 if (cmd.comp_mask)
3256 return -EOPNOTSUPP;
3258 uobj = uobj_get_write(uobj_get_type(rwq_ind_table), cmd.ind_tbl_handle,
3259 file->ucontext);
3260 if (IS_ERR(uobj))
3261 return PTR_ERR(uobj);
3263 return uobj_remove_commit(uobj);
3266 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
3267 struct ib_device *ib_dev,
3268 struct ib_udata *ucore,
3269 struct ib_udata *uhw)
3271 struct ib_uverbs_create_flow cmd;
3272 struct ib_uverbs_create_flow_resp resp;
3273 struct ib_uobject *uobj;
3274 struct ib_flow *flow_id;
3275 struct ib_uverbs_flow_attr *kern_flow_attr;
3276 struct ib_flow_attr *flow_attr;
3277 struct ib_qp *qp;
3278 int err = 0;
3279 void *kern_spec;
3280 void *ib_spec;
3281 int i;
3283 if (ucore->inlen < sizeof(cmd))
3284 return -EINVAL;
3286 if (ucore->outlen < sizeof(resp))
3287 return -ENOSPC;
3289 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3290 if (err)
3291 return err;
3293 ucore->inbuf += sizeof(cmd);
3294 ucore->inlen -= sizeof(cmd);
3296 if (cmd.comp_mask)
3297 return -EINVAL;
3299 if (!capable(CAP_NET_RAW))
3300 return -EPERM;
3302 if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED)
3303 return -EINVAL;
3305 if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
3306 ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) ||
3307 (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT)))
3308 return -EINVAL;
3310 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
3311 return -EINVAL;
3313 if (cmd.flow_attr.size > ucore->inlen ||
3314 cmd.flow_attr.size >
3315 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
3316 return -EINVAL;
3318 if (cmd.flow_attr.reserved[0] ||
3319 cmd.flow_attr.reserved[1])
3320 return -EINVAL;
3322 if (cmd.flow_attr.num_of_specs) {
3323 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
3324 GFP_KERNEL);
3325 if (!kern_flow_attr)
3326 return -ENOMEM;
3328 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
3329 err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
3330 cmd.flow_attr.size);
3331 if (err)
3332 goto err_free_attr;
3333 } else {
3334 kern_flow_attr = &cmd.flow_attr;
3337 uobj = uobj_alloc(uobj_get_type(flow), file->ucontext);
3338 if (IS_ERR(uobj)) {
3339 err = PTR_ERR(uobj);
3340 goto err_free_attr;
3343 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
3344 if (!qp) {
3345 err = -EINVAL;
3346 goto err_uobj;
3349 flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs *
3350 sizeof(union ib_flow_spec), GFP_KERNEL);
3351 if (!flow_attr) {
3352 err = -ENOMEM;
3353 goto err_put;
3356 flow_attr->type = kern_flow_attr->type;
3357 flow_attr->priority = kern_flow_attr->priority;
3358 flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
3359 flow_attr->port = kern_flow_attr->port;
3360 flow_attr->flags = kern_flow_attr->flags;
3361 flow_attr->size = sizeof(*flow_attr);
3363 kern_spec = kern_flow_attr + 1;
3364 ib_spec = flow_attr + 1;
3365 for (i = 0; i < flow_attr->num_of_specs &&
3366 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
3367 cmd.flow_attr.size >=
3368 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
3369 err = kern_spec_to_ib_spec(kern_spec, ib_spec);
3370 if (err)
3371 goto err_free;
3372 flow_attr->size +=
3373 ((union ib_flow_spec *) ib_spec)->size;
3374 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
3375 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
3376 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
3378 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
3379 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
3380 i, cmd.flow_attr.size);
3381 err = -EINVAL;
3382 goto err_free;
3384 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
3385 if (IS_ERR(flow_id)) {
3386 err = PTR_ERR(flow_id);
3387 goto err_free;
3389 flow_id->uobject = uobj;
3390 uobj->object = flow_id;
3392 memset(&resp, 0, sizeof(resp));
3393 resp.flow_handle = uobj->id;
3395 err = ib_copy_to_udata(ucore,
3396 &resp, sizeof(resp));
3397 if (err)
3398 goto err_copy;
3400 uobj_put_obj_read(qp);
3401 uobj_alloc_commit(uobj);
3402 kfree(flow_attr);
3403 if (cmd.flow_attr.num_of_specs)
3404 kfree(kern_flow_attr);
3405 return 0;
3406 err_copy:
3407 ib_destroy_flow(flow_id);
3408 err_free:
3409 kfree(flow_attr);
3410 err_put:
3411 uobj_put_obj_read(qp);
3412 err_uobj:
3413 uobj_alloc_abort(uobj);
3414 err_free_attr:
3415 if (cmd.flow_attr.num_of_specs)
3416 kfree(kern_flow_attr);
3417 return err;
3420 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
3421 struct ib_device *ib_dev,
3422 struct ib_udata *ucore,
3423 struct ib_udata *uhw)
3425 struct ib_uverbs_destroy_flow cmd;
3426 struct ib_uobject *uobj;
3427 int ret;
3429 if (ucore->inlen < sizeof(cmd))
3430 return -EINVAL;
3432 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3433 if (ret)
3434 return ret;
3436 if (cmd.comp_mask)
3437 return -EINVAL;
3439 uobj = uobj_get_write(uobj_get_type(flow), cmd.flow_handle,
3440 file->ucontext);
3441 if (IS_ERR(uobj))
3442 return PTR_ERR(uobj);
3444 ret = uobj_remove_commit(uobj);
3445 return ret;
3448 static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
3449 struct ib_device *ib_dev,
3450 struct ib_uverbs_create_xsrq *cmd,
3451 struct ib_udata *udata)
3453 struct ib_uverbs_create_srq_resp resp;
3454 struct ib_usrq_object *obj;
3455 struct ib_pd *pd;
3456 struct ib_srq *srq;
3457 struct ib_uobject *uninitialized_var(xrcd_uobj);
3458 struct ib_srq_init_attr attr;
3459 int ret;
3461 obj = (struct ib_usrq_object *)uobj_alloc(uobj_get_type(srq),
3462 file->ucontext);
3463 if (IS_ERR(obj))
3464 return PTR_ERR(obj);
3466 if (cmd->srq_type == IB_SRQT_TM)
3467 attr.ext.tag_matching.max_num_tags = cmd->max_num_tags;
3469 if (cmd->srq_type == IB_SRQT_XRC) {
3470 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd->xrcd_handle,
3471 file->ucontext);
3472 if (IS_ERR(xrcd_uobj)) {
3473 ret = -EINVAL;
3474 goto err;
3477 attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object;
3478 if (!attr.ext.xrc.xrcd) {
3479 ret = -EINVAL;
3480 goto err_put_xrcd;
3483 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
3484 atomic_inc(&obj->uxrcd->refcnt);
3487 if (ib_srq_has_cq(cmd->srq_type)) {
3488 attr.ext.cq = uobj_get_obj_read(cq, cmd->cq_handle,
3489 file->ucontext);
3490 if (!attr.ext.cq) {
3491 ret = -EINVAL;
3492 goto err_put_xrcd;
3496 pd = uobj_get_obj_read(pd, cmd->pd_handle, file->ucontext);
3497 if (!pd) {
3498 ret = -EINVAL;
3499 goto err_put_cq;
3502 attr.event_handler = ib_uverbs_srq_event_handler;
3503 attr.srq_context = file;
3504 attr.srq_type = cmd->srq_type;
3505 attr.attr.max_wr = cmd->max_wr;
3506 attr.attr.max_sge = cmd->max_sge;
3507 attr.attr.srq_limit = cmd->srq_limit;
3509 obj->uevent.events_reported = 0;
3510 INIT_LIST_HEAD(&obj->uevent.event_list);
3512 srq = pd->device->create_srq(pd, &attr, udata);
3513 if (IS_ERR(srq)) {
3514 ret = PTR_ERR(srq);
3515 goto err_put;
3518 srq->device = pd->device;
3519 srq->pd = pd;
3520 srq->srq_type = cmd->srq_type;
3521 srq->uobject = &obj->uevent.uobject;
3522 srq->event_handler = attr.event_handler;
3523 srq->srq_context = attr.srq_context;
3525 if (ib_srq_has_cq(cmd->srq_type)) {
3526 srq->ext.cq = attr.ext.cq;
3527 atomic_inc(&attr.ext.cq->usecnt);
3530 if (cmd->srq_type == IB_SRQT_XRC) {
3531 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
3532 atomic_inc(&attr.ext.xrc.xrcd->usecnt);
3535 atomic_inc(&pd->usecnt);
3536 atomic_set(&srq->usecnt, 0);
3538 obj->uevent.uobject.object = srq;
3539 obj->uevent.uobject.user_handle = cmd->user_handle;
3541 memset(&resp, 0, sizeof resp);
3542 resp.srq_handle = obj->uevent.uobject.id;
3543 resp.max_wr = attr.attr.max_wr;
3544 resp.max_sge = attr.attr.max_sge;
3545 if (cmd->srq_type == IB_SRQT_XRC)
3546 resp.srqn = srq->ext.xrc.srq_num;
3548 if (copy_to_user((void __user *) (unsigned long) cmd->response,
3549 &resp, sizeof resp)) {
3550 ret = -EFAULT;
3551 goto err_copy;
3554 if (cmd->srq_type == IB_SRQT_XRC)
3555 uobj_put_read(xrcd_uobj);
3557 if (ib_srq_has_cq(cmd->srq_type))
3558 uobj_put_obj_read(attr.ext.cq);
3560 uobj_put_obj_read(pd);
3561 uobj_alloc_commit(&obj->uevent.uobject);
3563 return 0;
3565 err_copy:
3566 ib_destroy_srq(srq);
3568 err_put:
3569 uobj_put_obj_read(pd);
3571 err_put_cq:
3572 if (ib_srq_has_cq(cmd->srq_type))
3573 uobj_put_obj_read(attr.ext.cq);
3575 err_put_xrcd:
3576 if (cmd->srq_type == IB_SRQT_XRC) {
3577 atomic_dec(&obj->uxrcd->refcnt);
3578 uobj_put_read(xrcd_uobj);
3581 err:
3582 uobj_alloc_abort(&obj->uevent.uobject);
3583 return ret;
3586 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
3587 struct ib_device *ib_dev,
3588 const char __user *buf, int in_len,
3589 int out_len)
3591 struct ib_uverbs_create_srq cmd;
3592 struct ib_uverbs_create_xsrq xcmd;
3593 struct ib_uverbs_create_srq_resp resp;
3594 struct ib_udata udata;
3595 int ret;
3597 if (out_len < sizeof resp)
3598 return -ENOSPC;
3600 if (copy_from_user(&cmd, buf, sizeof cmd))
3601 return -EFAULT;
3603 memset(&xcmd, 0, sizeof(xcmd));
3604 xcmd.response = cmd.response;
3605 xcmd.user_handle = cmd.user_handle;
3606 xcmd.srq_type = IB_SRQT_BASIC;
3607 xcmd.pd_handle = cmd.pd_handle;
3608 xcmd.max_wr = cmd.max_wr;
3609 xcmd.max_sge = cmd.max_sge;
3610 xcmd.srq_limit = cmd.srq_limit;
3612 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
3613 u64_to_user_ptr(cmd.response) + sizeof(resp),
3614 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
3615 out_len - sizeof(resp));
3617 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
3618 if (ret)
3619 return ret;
3621 return in_len;
3624 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
3625 struct ib_device *ib_dev,
3626 const char __user *buf, int in_len, int out_len)
3628 struct ib_uverbs_create_xsrq cmd;
3629 struct ib_uverbs_create_srq_resp resp;
3630 struct ib_udata udata;
3631 int ret;
3633 if (out_len < sizeof resp)
3634 return -ENOSPC;
3636 if (copy_from_user(&cmd, buf, sizeof cmd))
3637 return -EFAULT;
3639 ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
3640 u64_to_user_ptr(cmd.response) + sizeof(resp),
3641 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
3642 out_len - sizeof(resp));
3644 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
3645 if (ret)
3646 return ret;
3648 return in_len;
3651 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
3652 struct ib_device *ib_dev,
3653 const char __user *buf, int in_len,
3654 int out_len)
3656 struct ib_uverbs_modify_srq cmd;
3657 struct ib_udata udata;
3658 struct ib_srq *srq;
3659 struct ib_srq_attr attr;
3660 int ret;
3662 if (copy_from_user(&cmd, buf, sizeof cmd))
3663 return -EFAULT;
3665 ib_uverbs_init_udata(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
3666 out_len);
3668 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext);
3669 if (!srq)
3670 return -EINVAL;
3672 attr.max_wr = cmd.max_wr;
3673 attr.srq_limit = cmd.srq_limit;
3675 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
3677 uobj_put_obj_read(srq);
3679 return ret ? ret : in_len;
3682 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
3683 struct ib_device *ib_dev,
3684 const char __user *buf,
3685 int in_len, int out_len)
3687 struct ib_uverbs_query_srq cmd;
3688 struct ib_uverbs_query_srq_resp resp;
3689 struct ib_srq_attr attr;
3690 struct ib_srq *srq;
3691 int ret;
3693 if (out_len < sizeof resp)
3694 return -ENOSPC;
3696 if (copy_from_user(&cmd, buf, sizeof cmd))
3697 return -EFAULT;
3699 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext);
3700 if (!srq)
3701 return -EINVAL;
3703 ret = ib_query_srq(srq, &attr);
3705 uobj_put_obj_read(srq);
3707 if (ret)
3708 return ret;
3710 memset(&resp, 0, sizeof resp);
3712 resp.max_wr = attr.max_wr;
3713 resp.max_sge = attr.max_sge;
3714 resp.srq_limit = attr.srq_limit;
3716 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
3717 return -EFAULT;
3719 return in_len;
3722 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3723 struct ib_device *ib_dev,
3724 const char __user *buf, int in_len,
3725 int out_len)
3727 struct ib_uverbs_destroy_srq cmd;
3728 struct ib_uverbs_destroy_srq_resp resp;
3729 struct ib_uobject *uobj;
3730 struct ib_uevent_object *obj;
3731 int ret = -EINVAL;
3733 if (copy_from_user(&cmd, buf, sizeof cmd))
3734 return -EFAULT;
3736 uobj = uobj_get_write(uobj_get_type(srq), cmd.srq_handle,
3737 file->ucontext);
3738 if (IS_ERR(uobj))
3739 return PTR_ERR(uobj);
3741 obj = container_of(uobj, struct ib_uevent_object, uobject);
3743 * Make sure we don't free the memory in remove_commit as we still
3744 * needs the uobject memory to create the response.
3746 uverbs_uobject_get(uobj);
3748 memset(&resp, 0, sizeof(resp));
3750 ret = uobj_remove_commit(uobj);
3751 if (ret) {
3752 uverbs_uobject_put(uobj);
3753 return ret;
3755 resp.events_reported = obj->events_reported;
3756 uverbs_uobject_put(uobj);
3757 if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp)))
3758 return -EFAULT;
3760 return in_len;
3763 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
3764 struct ib_device *ib_dev,
3765 struct ib_udata *ucore,
3766 struct ib_udata *uhw)
3768 struct ib_uverbs_ex_query_device_resp resp = { {0} };
3769 struct ib_uverbs_ex_query_device cmd;
3770 struct ib_device_attr attr = {0};
3771 int err;
3773 if (ucore->inlen < sizeof(cmd))
3774 return -EINVAL;
3776 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3777 if (err)
3778 return err;
3780 if (cmd.comp_mask)
3781 return -EINVAL;
3783 if (cmd.reserved)
3784 return -EINVAL;
3786 resp.response_length = offsetof(typeof(resp), odp_caps);
3788 if (ucore->outlen < resp.response_length)
3789 return -ENOSPC;
3791 err = ib_dev->query_device(ib_dev, &attr, uhw);
3792 if (err)
3793 return err;
3795 copy_query_dev_fields(file, ib_dev, &resp.base, &attr);
3797 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
3798 goto end;
3800 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3801 resp.odp_caps.general_caps = attr.odp_caps.general_caps;
3802 resp.odp_caps.per_transport_caps.rc_odp_caps =
3803 attr.odp_caps.per_transport_caps.rc_odp_caps;
3804 resp.odp_caps.per_transport_caps.uc_odp_caps =
3805 attr.odp_caps.per_transport_caps.uc_odp_caps;
3806 resp.odp_caps.per_transport_caps.ud_odp_caps =
3807 attr.odp_caps.per_transport_caps.ud_odp_caps;
3808 #endif
3809 resp.response_length += sizeof(resp.odp_caps);
3811 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask))
3812 goto end;
3814 resp.timestamp_mask = attr.timestamp_mask;
3815 resp.response_length += sizeof(resp.timestamp_mask);
3817 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock))
3818 goto end;
3820 resp.hca_core_clock = attr.hca_core_clock;
3821 resp.response_length += sizeof(resp.hca_core_clock);
3823 if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex))
3824 goto end;
3826 resp.device_cap_flags_ex = attr.device_cap_flags;
3827 resp.response_length += sizeof(resp.device_cap_flags_ex);
3829 if (ucore->outlen < resp.response_length + sizeof(resp.rss_caps))
3830 goto end;
3832 resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts;
3833 resp.rss_caps.max_rwq_indirection_tables =
3834 attr.rss_caps.max_rwq_indirection_tables;
3835 resp.rss_caps.max_rwq_indirection_table_size =
3836 attr.rss_caps.max_rwq_indirection_table_size;
3838 resp.response_length += sizeof(resp.rss_caps);
3840 if (ucore->outlen < resp.response_length + sizeof(resp.max_wq_type_rq))
3841 goto end;
3843 resp.max_wq_type_rq = attr.max_wq_type_rq;
3844 resp.response_length += sizeof(resp.max_wq_type_rq);
3846 if (ucore->outlen < resp.response_length + sizeof(resp.raw_packet_caps))
3847 goto end;
3849 resp.raw_packet_caps = attr.raw_packet_caps;
3850 resp.response_length += sizeof(resp.raw_packet_caps);
3852 if (ucore->outlen < resp.response_length + sizeof(resp.tm_caps))
3853 goto end;
3855 resp.tm_caps.max_rndv_hdr_size = attr.tm_caps.max_rndv_hdr_size;
3856 resp.tm_caps.max_num_tags = attr.tm_caps.max_num_tags;
3857 resp.tm_caps.max_ops = attr.tm_caps.max_ops;
3858 resp.tm_caps.max_sge = attr.tm_caps.max_sge;
3859 resp.tm_caps.flags = attr.tm_caps.flags;
3860 resp.response_length += sizeof(resp.tm_caps);
3862 if (ucore->outlen < resp.response_length + sizeof(resp.cq_moderation_caps))
3863 goto end;
3865 resp.cq_moderation_caps.max_cq_moderation_count =
3866 attr.cq_caps.max_cq_moderation_count;
3867 resp.cq_moderation_caps.max_cq_moderation_period =
3868 attr.cq_caps.max_cq_moderation_period;
3869 resp.response_length += sizeof(resp.cq_moderation_caps);
3870 end:
3871 err = ib_copy_to_udata(ucore, &resp, resp.response_length);
3872 return err;
3875 int ib_uverbs_ex_modify_cq(struct ib_uverbs_file *file,
3876 struct ib_device *ib_dev,
3877 struct ib_udata *ucore,
3878 struct ib_udata *uhw)
3880 struct ib_uverbs_ex_modify_cq cmd = {};
3881 struct ib_cq *cq;
3882 size_t required_cmd_sz;
3883 int ret;
3885 required_cmd_sz = offsetof(typeof(cmd), reserved) +
3886 sizeof(cmd.reserved);
3887 if (ucore->inlen < required_cmd_sz)
3888 return -EINVAL;
3890 /* sanity checks */
3891 if (ucore->inlen > sizeof(cmd) &&
3892 !ib_is_udata_cleared(ucore, sizeof(cmd),
3893 ucore->inlen - sizeof(cmd)))
3894 return -EOPNOTSUPP;
3896 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3897 if (ret)
3898 return ret;
3900 if (!cmd.attr_mask || cmd.reserved)
3901 return -EINVAL;
3903 if (cmd.attr_mask > IB_CQ_MODERATE)
3904 return -EOPNOTSUPP;
3906 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
3907 if (!cq)
3908 return -EINVAL;
3910 ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period);
3912 uobj_put_obj_read(cq);
3914 return ret;