WIP FPC-III support
[linux/fpc-iii.git] / drivers / infiniband / sw / rxe / rxe_verbs.c
bloba031514e2f41a73c4709d0a23ef0798f11945d1f
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
7 #include <linux/dma-mapping.h>
8 #include <net/addrconf.h>
9 #include <rdma/uverbs_ioctl.h>
10 #include "rxe.h"
11 #include "rxe_loc.h"
12 #include "rxe_queue.h"
13 #include "rxe_hw_counters.h"
15 static int rxe_query_device(struct ib_device *dev,
16 struct ib_device_attr *attr,
17 struct ib_udata *uhw)
19 struct rxe_dev *rxe = to_rdev(dev);
21 if (uhw->inlen || uhw->outlen)
22 return -EINVAL;
24 *attr = rxe->attr;
25 return 0;
28 static int rxe_query_port(struct ib_device *dev,
29 u8 port_num, struct ib_port_attr *attr)
31 struct rxe_dev *rxe = to_rdev(dev);
32 struct rxe_port *port;
33 int rc;
35 port = &rxe->port;
37 /* *attr being zeroed by the caller, avoid zeroing it here */
38 *attr = port->attr;
40 mutex_lock(&rxe->usdev_lock);
41 rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
42 &attr->active_width);
44 if (attr->state == IB_PORT_ACTIVE)
45 attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
46 else if (dev_get_flags(rxe->ndev) & IFF_UP)
47 attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
48 else
49 attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
51 mutex_unlock(&rxe->usdev_lock);
53 return rc;
56 static int rxe_query_pkey(struct ib_device *device,
57 u8 port_num, u16 index, u16 *pkey)
59 if (index > 0)
60 return -EINVAL;
62 *pkey = IB_DEFAULT_PKEY_FULL;
63 return 0;
66 static int rxe_modify_device(struct ib_device *dev,
67 int mask, struct ib_device_modify *attr)
69 struct rxe_dev *rxe = to_rdev(dev);
71 if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
72 IB_DEVICE_MODIFY_NODE_DESC))
73 return -EOPNOTSUPP;
75 if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
76 rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
78 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
79 memcpy(rxe->ib_dev.node_desc,
80 attr->node_desc, sizeof(rxe->ib_dev.node_desc));
83 return 0;
86 static int rxe_modify_port(struct ib_device *dev,
87 u8 port_num, int mask, struct ib_port_modify *attr)
89 struct rxe_dev *rxe = to_rdev(dev);
90 struct rxe_port *port;
92 port = &rxe->port;
94 port->attr.port_cap_flags |= attr->set_port_cap_mask;
95 port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
97 if (mask & IB_PORT_RESET_QKEY_CNTR)
98 port->attr.qkey_viol_cntr = 0;
100 return 0;
103 static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
104 u8 port_num)
106 return IB_LINK_LAYER_ETHERNET;
109 static int rxe_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
111 struct rxe_dev *rxe = to_rdev(uctx->device);
112 struct rxe_ucontext *uc = to_ruc(uctx);
114 return rxe_add_to_pool(&rxe->uc_pool, &uc->pelem);
117 static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
119 struct rxe_ucontext *uc = to_ruc(ibuc);
121 rxe_drop_ref(uc);
124 static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
125 struct ib_port_immutable *immutable)
127 int err;
128 struct ib_port_attr attr;
130 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
132 err = ib_query_port(dev, port_num, &attr);
133 if (err)
134 return err;
136 immutable->pkey_tbl_len = attr.pkey_tbl_len;
137 immutable->gid_tbl_len = attr.gid_tbl_len;
138 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
140 return 0;
143 static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
145 struct rxe_dev *rxe = to_rdev(ibpd->device);
146 struct rxe_pd *pd = to_rpd(ibpd);
148 return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem);
151 static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
153 struct rxe_pd *pd = to_rpd(ibpd);
155 rxe_drop_ref(pd);
156 return 0;
159 static int rxe_create_ah(struct ib_ah *ibah,
160 struct rdma_ah_init_attr *init_attr,
161 struct ib_udata *udata)
164 int err;
165 struct rxe_dev *rxe = to_rdev(ibah->device);
166 struct rxe_ah *ah = to_rah(ibah);
168 err = rxe_av_chk_attr(rxe, init_attr->ah_attr);
169 if (err)
170 return err;
172 err = rxe_add_to_pool(&rxe->ah_pool, &ah->pelem);
173 if (err)
174 return err;
176 rxe_init_av(init_attr->ah_attr, &ah->av);
177 return 0;
180 static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
182 int err;
183 struct rxe_dev *rxe = to_rdev(ibah->device);
184 struct rxe_ah *ah = to_rah(ibah);
186 err = rxe_av_chk_attr(rxe, attr);
187 if (err)
188 return err;
190 rxe_init_av(attr, &ah->av);
191 return 0;
194 static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
196 struct rxe_ah *ah = to_rah(ibah);
198 memset(attr, 0, sizeof(*attr));
199 attr->type = ibah->type;
200 rxe_av_to_attr(&ah->av, attr);
201 return 0;
204 static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
206 struct rxe_ah *ah = to_rah(ibah);
208 rxe_drop_ref(ah);
209 return 0;
212 static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
214 int err;
215 int i;
216 u32 length;
217 struct rxe_recv_wqe *recv_wqe;
218 int num_sge = ibwr->num_sge;
220 if (unlikely(queue_full(rq->queue))) {
221 err = -ENOMEM;
222 goto err1;
225 if (unlikely(num_sge > rq->max_sge)) {
226 err = -EINVAL;
227 goto err1;
230 length = 0;
231 for (i = 0; i < num_sge; i++)
232 length += ibwr->sg_list[i].length;
234 recv_wqe = producer_addr(rq->queue);
235 recv_wqe->wr_id = ibwr->wr_id;
236 recv_wqe->num_sge = num_sge;
238 memcpy(recv_wqe->dma.sge, ibwr->sg_list,
239 num_sge * sizeof(struct ib_sge));
241 recv_wqe->dma.length = length;
242 recv_wqe->dma.resid = length;
243 recv_wqe->dma.num_sge = num_sge;
244 recv_wqe->dma.cur_sge = 0;
245 recv_wqe->dma.sge_offset = 0;
247 advance_producer(rq->queue);
248 return 0;
250 err1:
251 return err;
254 static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
255 struct ib_udata *udata)
257 int err;
258 struct rxe_dev *rxe = to_rdev(ibsrq->device);
259 struct rxe_pd *pd = to_rpd(ibsrq->pd);
260 struct rxe_srq *srq = to_rsrq(ibsrq);
261 struct rxe_create_srq_resp __user *uresp = NULL;
263 if (init->srq_type != IB_SRQT_BASIC)
264 return -EOPNOTSUPP;
266 if (udata) {
267 if (udata->outlen < sizeof(*uresp))
268 return -EINVAL;
269 uresp = udata->outbuf;
272 err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
273 if (err)
274 goto err1;
276 err = rxe_add_to_pool(&rxe->srq_pool, &srq->pelem);
277 if (err)
278 goto err1;
280 rxe_add_ref(pd);
281 srq->pd = pd;
283 err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
284 if (err)
285 goto err2;
287 return 0;
289 err2:
290 rxe_drop_ref(pd);
291 rxe_drop_ref(srq);
292 err1:
293 return err;
296 static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
297 enum ib_srq_attr_mask mask,
298 struct ib_udata *udata)
300 int err;
301 struct rxe_srq *srq = to_rsrq(ibsrq);
302 struct rxe_dev *rxe = to_rdev(ibsrq->device);
303 struct rxe_modify_srq_cmd ucmd = {};
305 if (udata) {
306 if (udata->inlen < sizeof(ucmd))
307 return -EINVAL;
309 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
310 if (err)
311 return err;
314 err = rxe_srq_chk_attr(rxe, srq, attr, mask);
315 if (err)
316 goto err1;
318 err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata);
319 if (err)
320 goto err1;
322 return 0;
324 err1:
325 return err;
328 static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
330 struct rxe_srq *srq = to_rsrq(ibsrq);
332 if (srq->error)
333 return -EINVAL;
335 attr->max_wr = srq->rq.queue->buf->index_mask;
336 attr->max_sge = srq->rq.max_sge;
337 attr->srq_limit = srq->limit;
338 return 0;
341 static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
343 struct rxe_srq *srq = to_rsrq(ibsrq);
345 if (srq->rq.queue)
346 rxe_queue_cleanup(srq->rq.queue);
348 rxe_drop_ref(srq->pd);
349 rxe_drop_ref(srq);
350 return 0;
353 static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
354 const struct ib_recv_wr **bad_wr)
356 int err = 0;
357 unsigned long flags;
358 struct rxe_srq *srq = to_rsrq(ibsrq);
360 spin_lock_irqsave(&srq->rq.producer_lock, flags);
362 while (wr) {
363 err = post_one_recv(&srq->rq, wr);
364 if (unlikely(err))
365 break;
366 wr = wr->next;
369 spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
371 if (err)
372 *bad_wr = wr;
374 return err;
377 static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
378 struct ib_qp_init_attr *init,
379 struct ib_udata *udata)
381 int err;
382 struct rxe_dev *rxe = to_rdev(ibpd->device);
383 struct rxe_pd *pd = to_rpd(ibpd);
384 struct rxe_qp *qp;
385 struct rxe_create_qp_resp __user *uresp = NULL;
387 if (udata) {
388 if (udata->outlen < sizeof(*uresp))
389 return ERR_PTR(-EINVAL);
390 uresp = udata->outbuf;
393 if (init->create_flags)
394 return ERR_PTR(-EOPNOTSUPP);
396 err = rxe_qp_chk_init(rxe, init);
397 if (err)
398 goto err1;
400 qp = rxe_alloc(&rxe->qp_pool);
401 if (!qp) {
402 err = -ENOMEM;
403 goto err1;
406 if (udata) {
407 if (udata->inlen) {
408 err = -EINVAL;
409 goto err2;
411 qp->is_user = 1;
414 rxe_add_index(qp);
416 err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd, udata);
417 if (err)
418 goto err3;
420 return &qp->ibqp;
422 err3:
423 rxe_drop_index(qp);
424 err2:
425 rxe_drop_ref(qp);
426 err1:
427 return ERR_PTR(err);
430 static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
431 int mask, struct ib_udata *udata)
433 int err;
434 struct rxe_dev *rxe = to_rdev(ibqp->device);
435 struct rxe_qp *qp = to_rqp(ibqp);
437 if (mask & ~IB_QP_ATTR_STANDARD_BITS)
438 return -EOPNOTSUPP;
440 err = rxe_qp_chk_attr(rxe, qp, attr, mask);
441 if (err)
442 goto err1;
444 err = rxe_qp_from_attr(qp, attr, mask, udata);
445 if (err)
446 goto err1;
448 return 0;
450 err1:
451 return err;
454 static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
455 int mask, struct ib_qp_init_attr *init)
457 struct rxe_qp *qp = to_rqp(ibqp);
459 rxe_qp_to_init(qp, init);
460 rxe_qp_to_attr(qp, attr, mask);
462 return 0;
465 static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
467 struct rxe_qp *qp = to_rqp(ibqp);
469 rxe_qp_destroy(qp);
470 rxe_drop_index(qp);
471 rxe_drop_ref(qp);
472 return 0;
475 static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
476 unsigned int mask, unsigned int length)
478 int num_sge = ibwr->num_sge;
479 struct rxe_sq *sq = &qp->sq;
481 if (unlikely(num_sge > sq->max_sge))
482 goto err1;
484 if (unlikely(mask & WR_ATOMIC_MASK)) {
485 if (length < 8)
486 goto err1;
488 if (atomic_wr(ibwr)->remote_addr & 0x7)
489 goto err1;
492 if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
493 (length > sq->max_inline)))
494 goto err1;
496 return 0;
498 err1:
499 return -EINVAL;
502 static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
503 const struct ib_send_wr *ibwr)
505 wr->wr_id = ibwr->wr_id;
506 wr->num_sge = ibwr->num_sge;
507 wr->opcode = ibwr->opcode;
508 wr->send_flags = ibwr->send_flags;
510 if (qp_type(qp) == IB_QPT_UD ||
511 qp_type(qp) == IB_QPT_SMI ||
512 qp_type(qp) == IB_QPT_GSI) {
513 wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
514 wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
515 if (qp_type(qp) == IB_QPT_GSI)
516 wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
517 if (wr->opcode == IB_WR_SEND_WITH_IMM)
518 wr->ex.imm_data = ibwr->ex.imm_data;
519 } else {
520 switch (wr->opcode) {
521 case IB_WR_RDMA_WRITE_WITH_IMM:
522 wr->ex.imm_data = ibwr->ex.imm_data;
523 fallthrough;
524 case IB_WR_RDMA_READ:
525 case IB_WR_RDMA_WRITE:
526 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
527 wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
528 break;
529 case IB_WR_SEND_WITH_IMM:
530 wr->ex.imm_data = ibwr->ex.imm_data;
531 break;
532 case IB_WR_SEND_WITH_INV:
533 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
534 break;
535 case IB_WR_ATOMIC_CMP_AND_SWP:
536 case IB_WR_ATOMIC_FETCH_AND_ADD:
537 wr->wr.atomic.remote_addr =
538 atomic_wr(ibwr)->remote_addr;
539 wr->wr.atomic.compare_add =
540 atomic_wr(ibwr)->compare_add;
541 wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
542 wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
543 break;
544 case IB_WR_LOCAL_INV:
545 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
546 break;
547 case IB_WR_REG_MR:
548 wr->wr.reg.mr = reg_wr(ibwr)->mr;
549 wr->wr.reg.key = reg_wr(ibwr)->key;
550 wr->wr.reg.access = reg_wr(ibwr)->access;
551 break;
552 default:
553 break;
558 static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
559 unsigned int mask, unsigned int length,
560 struct rxe_send_wqe *wqe)
562 int num_sge = ibwr->num_sge;
563 struct ib_sge *sge;
564 int i;
565 u8 *p;
567 init_send_wr(qp, &wqe->wr, ibwr);
569 if (qp_type(qp) == IB_QPT_UD ||
570 qp_type(qp) == IB_QPT_SMI ||
571 qp_type(qp) == IB_QPT_GSI)
572 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
574 if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
575 p = wqe->dma.inline_data;
577 sge = ibwr->sg_list;
578 for (i = 0; i < num_sge; i++, sge++) {
579 memcpy(p, (void *)(uintptr_t)sge->addr,
580 sge->length);
582 p += sge->length;
584 } else if (mask & WR_REG_MASK) {
585 wqe->mask = mask;
586 wqe->state = wqe_state_posted;
587 return 0;
588 } else
589 memcpy(wqe->dma.sge, ibwr->sg_list,
590 num_sge * sizeof(struct ib_sge));
592 wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
593 mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
594 wqe->mask = mask;
595 wqe->dma.length = length;
596 wqe->dma.resid = length;
597 wqe->dma.num_sge = num_sge;
598 wqe->dma.cur_sge = 0;
599 wqe->dma.sge_offset = 0;
600 wqe->state = wqe_state_posted;
601 wqe->ssn = atomic_add_return(1, &qp->ssn);
603 return 0;
606 static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
607 unsigned int mask, u32 length)
609 int err;
610 struct rxe_sq *sq = &qp->sq;
611 struct rxe_send_wqe *send_wqe;
612 unsigned long flags;
614 err = validate_send_wr(qp, ibwr, mask, length);
615 if (err)
616 return err;
618 spin_lock_irqsave(&qp->sq.sq_lock, flags);
620 if (unlikely(queue_full(sq->queue))) {
621 err = -ENOMEM;
622 goto err1;
625 send_wqe = producer_addr(sq->queue);
627 err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
628 if (unlikely(err))
629 goto err1;
631 advance_producer(sq->queue);
632 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
634 return 0;
636 err1:
637 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
638 return err;
641 static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
642 const struct ib_send_wr **bad_wr)
644 int err = 0;
645 unsigned int mask;
646 unsigned int length = 0;
647 int i;
648 struct ib_send_wr *next;
650 while (wr) {
651 mask = wr_opcode_mask(wr->opcode, qp);
652 if (unlikely(!mask)) {
653 err = -EINVAL;
654 *bad_wr = wr;
655 break;
658 if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
659 !(mask & WR_INLINE_MASK))) {
660 err = -EINVAL;
661 *bad_wr = wr;
662 break;
665 next = wr->next;
667 length = 0;
668 for (i = 0; i < wr->num_sge; i++)
669 length += wr->sg_list[i].length;
671 err = post_one_send(qp, wr, mask, length);
673 if (err) {
674 *bad_wr = wr;
675 break;
677 wr = next;
680 rxe_run_task(&qp->req.task, 1);
681 if (unlikely(qp->req.state == QP_STATE_ERROR))
682 rxe_run_task(&qp->comp.task, 1);
684 return err;
687 static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
688 const struct ib_send_wr **bad_wr)
690 struct rxe_qp *qp = to_rqp(ibqp);
692 if (unlikely(!qp->valid)) {
693 *bad_wr = wr;
694 return -EINVAL;
697 if (unlikely(qp->req.state < QP_STATE_READY)) {
698 *bad_wr = wr;
699 return -EINVAL;
702 if (qp->is_user) {
703 /* Utilize process context to do protocol processing */
704 rxe_run_task(&qp->req.task, 0);
705 return 0;
706 } else
707 return rxe_post_send_kernel(qp, wr, bad_wr);
710 static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
711 const struct ib_recv_wr **bad_wr)
713 int err = 0;
714 struct rxe_qp *qp = to_rqp(ibqp);
715 struct rxe_rq *rq = &qp->rq;
716 unsigned long flags;
718 if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
719 *bad_wr = wr;
720 err = -EINVAL;
721 goto err1;
724 if (unlikely(qp->srq)) {
725 *bad_wr = wr;
726 err = -EINVAL;
727 goto err1;
730 spin_lock_irqsave(&rq->producer_lock, flags);
732 while (wr) {
733 err = post_one_recv(rq, wr);
734 if (unlikely(err)) {
735 *bad_wr = wr;
736 break;
738 wr = wr->next;
741 spin_unlock_irqrestore(&rq->producer_lock, flags);
743 if (qp->resp.state == QP_STATE_ERROR)
744 rxe_run_task(&qp->resp.task, 1);
746 err1:
747 return err;
750 static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
751 struct ib_udata *udata)
753 int err;
754 struct ib_device *dev = ibcq->device;
755 struct rxe_dev *rxe = to_rdev(dev);
756 struct rxe_cq *cq = to_rcq(ibcq);
757 struct rxe_create_cq_resp __user *uresp = NULL;
759 if (udata) {
760 if (udata->outlen < sizeof(*uresp))
761 return -EINVAL;
762 uresp = udata->outbuf;
765 if (attr->flags)
766 return -EOPNOTSUPP;
768 err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
769 if (err)
770 return err;
772 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata,
773 uresp);
774 if (err)
775 return err;
777 return rxe_add_to_pool(&rxe->cq_pool, &cq->pelem);
780 static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
782 struct rxe_cq *cq = to_rcq(ibcq);
784 rxe_cq_disable(cq);
786 rxe_drop_ref(cq);
787 return 0;
790 static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
792 int err;
793 struct rxe_cq *cq = to_rcq(ibcq);
794 struct rxe_dev *rxe = to_rdev(ibcq->device);
795 struct rxe_resize_cq_resp __user *uresp = NULL;
797 if (udata) {
798 if (udata->outlen < sizeof(*uresp))
799 return -EINVAL;
800 uresp = udata->outbuf;
803 err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
804 if (err)
805 goto err1;
807 err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
808 if (err)
809 goto err1;
811 return 0;
813 err1:
814 return err;
817 static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
819 int i;
820 struct rxe_cq *cq = to_rcq(ibcq);
821 struct rxe_cqe *cqe;
822 unsigned long flags;
824 spin_lock_irqsave(&cq->cq_lock, flags);
825 for (i = 0; i < num_entries; i++) {
826 cqe = queue_head(cq->queue);
827 if (!cqe)
828 break;
830 memcpy(wc++, &cqe->ibwc, sizeof(*wc));
831 advance_consumer(cq->queue);
833 spin_unlock_irqrestore(&cq->cq_lock, flags);
835 return i;
838 static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
840 struct rxe_cq *cq = to_rcq(ibcq);
841 int count = queue_count(cq->queue);
843 return (count > wc_cnt) ? wc_cnt : count;
846 static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
848 struct rxe_cq *cq = to_rcq(ibcq);
849 unsigned long irq_flags;
850 int ret = 0;
852 spin_lock_irqsave(&cq->cq_lock, irq_flags);
853 if (cq->notify != IB_CQ_NEXT_COMP)
854 cq->notify = flags & IB_CQ_SOLICITED_MASK;
856 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
857 ret = 1;
859 spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
861 return ret;
864 static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
866 struct rxe_dev *rxe = to_rdev(ibpd->device);
867 struct rxe_pd *pd = to_rpd(ibpd);
868 struct rxe_mem *mr;
870 mr = rxe_alloc(&rxe->mr_pool);
871 if (!mr)
872 return ERR_PTR(-ENOMEM);
874 rxe_add_index(mr);
875 rxe_add_ref(pd);
876 rxe_mem_init_dma(pd, access, mr);
878 return &mr->ibmr;
881 static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
882 u64 start,
883 u64 length,
884 u64 iova,
885 int access, struct ib_udata *udata)
887 int err;
888 struct rxe_dev *rxe = to_rdev(ibpd->device);
889 struct rxe_pd *pd = to_rpd(ibpd);
890 struct rxe_mem *mr;
892 mr = rxe_alloc(&rxe->mr_pool);
893 if (!mr) {
894 err = -ENOMEM;
895 goto err2;
898 rxe_add_index(mr);
900 rxe_add_ref(pd);
902 err = rxe_mem_init_user(pd, start, length, iova,
903 access, udata, mr);
904 if (err)
905 goto err3;
907 return &mr->ibmr;
909 err3:
910 rxe_drop_ref(pd);
911 rxe_drop_index(mr);
912 rxe_drop_ref(mr);
913 err2:
914 return ERR_PTR(err);
917 static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
919 struct rxe_mem *mr = to_rmr(ibmr);
921 mr->state = RXE_MEM_STATE_ZOMBIE;
922 rxe_drop_ref(mr_pd(mr));
923 rxe_drop_index(mr);
924 rxe_drop_ref(mr);
925 return 0;
928 static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
929 u32 max_num_sg)
931 struct rxe_dev *rxe = to_rdev(ibpd->device);
932 struct rxe_pd *pd = to_rpd(ibpd);
933 struct rxe_mem *mr;
934 int err;
936 if (mr_type != IB_MR_TYPE_MEM_REG)
937 return ERR_PTR(-EINVAL);
939 mr = rxe_alloc(&rxe->mr_pool);
940 if (!mr) {
941 err = -ENOMEM;
942 goto err1;
945 rxe_add_index(mr);
947 rxe_add_ref(pd);
949 err = rxe_mem_init_fast(pd, max_num_sg, mr);
950 if (err)
951 goto err2;
953 return &mr->ibmr;
955 err2:
956 rxe_drop_ref(pd);
957 rxe_drop_index(mr);
958 rxe_drop_ref(mr);
959 err1:
960 return ERR_PTR(err);
963 static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
965 struct rxe_mem *mr = to_rmr(ibmr);
966 struct rxe_map *map;
967 struct rxe_phys_buf *buf;
969 if (unlikely(mr->nbuf == mr->num_buf))
970 return -ENOMEM;
972 map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
973 buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
975 buf->addr = addr;
976 buf->size = ibmr->page_size;
977 mr->nbuf++;
979 return 0;
982 static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
983 int sg_nents, unsigned int *sg_offset)
985 struct rxe_mem *mr = to_rmr(ibmr);
986 int n;
988 mr->nbuf = 0;
990 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
992 mr->va = ibmr->iova;
993 mr->iova = ibmr->iova;
994 mr->length = ibmr->length;
995 mr->page_shift = ilog2(ibmr->page_size);
996 mr->page_mask = ibmr->page_size - 1;
997 mr->offset = mr->iova & mr->page_mask;
999 return n;
1002 static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1004 int err;
1005 struct rxe_dev *rxe = to_rdev(ibqp->device);
1006 struct rxe_qp *qp = to_rqp(ibqp);
1007 struct rxe_mc_grp *grp;
1009 /* takes a ref on grp if successful */
1010 err = rxe_mcast_get_grp(rxe, mgid, &grp);
1011 if (err)
1012 return err;
1014 err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1016 rxe_drop_ref(grp);
1017 return err;
1020 static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1022 struct rxe_dev *rxe = to_rdev(ibqp->device);
1023 struct rxe_qp *qp = to_rqp(ibqp);
1025 return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1028 static ssize_t parent_show(struct device *device,
1029 struct device_attribute *attr, char *buf)
1031 struct rxe_dev *rxe =
1032 rdma_device_to_drv_device(device, struct rxe_dev, ib_dev);
1034 return sysfs_emit(buf, "%s\n", rxe_parent_name(rxe, 1));
1037 static DEVICE_ATTR_RO(parent);
1039 static struct attribute *rxe_dev_attributes[] = {
1040 &dev_attr_parent.attr,
1041 NULL
1044 static const struct attribute_group rxe_attr_group = {
1045 .attrs = rxe_dev_attributes,
1048 static int rxe_enable_driver(struct ib_device *ib_dev)
1050 struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
1052 rxe_set_port_state(rxe);
1053 dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(rxe->ndev));
1054 return 0;
1057 static const struct ib_device_ops rxe_dev_ops = {
1058 .owner = THIS_MODULE,
1059 .driver_id = RDMA_DRIVER_RXE,
1060 .uverbs_abi_ver = RXE_UVERBS_ABI_VERSION,
1062 .alloc_hw_stats = rxe_ib_alloc_hw_stats,
1063 .alloc_mr = rxe_alloc_mr,
1064 .alloc_pd = rxe_alloc_pd,
1065 .alloc_ucontext = rxe_alloc_ucontext,
1066 .attach_mcast = rxe_attach_mcast,
1067 .create_ah = rxe_create_ah,
1068 .create_cq = rxe_create_cq,
1069 .create_qp = rxe_create_qp,
1070 .create_srq = rxe_create_srq,
1071 .create_user_ah = rxe_create_ah,
1072 .dealloc_driver = rxe_dealloc,
1073 .dealloc_pd = rxe_dealloc_pd,
1074 .dealloc_ucontext = rxe_dealloc_ucontext,
1075 .dereg_mr = rxe_dereg_mr,
1076 .destroy_ah = rxe_destroy_ah,
1077 .destroy_cq = rxe_destroy_cq,
1078 .destroy_qp = rxe_destroy_qp,
1079 .destroy_srq = rxe_destroy_srq,
1080 .detach_mcast = rxe_detach_mcast,
1081 .enable_driver = rxe_enable_driver,
1082 .get_dma_mr = rxe_get_dma_mr,
1083 .get_hw_stats = rxe_ib_get_hw_stats,
1084 .get_link_layer = rxe_get_link_layer,
1085 .get_port_immutable = rxe_port_immutable,
1086 .map_mr_sg = rxe_map_mr_sg,
1087 .mmap = rxe_mmap,
1088 .modify_ah = rxe_modify_ah,
1089 .modify_device = rxe_modify_device,
1090 .modify_port = rxe_modify_port,
1091 .modify_qp = rxe_modify_qp,
1092 .modify_srq = rxe_modify_srq,
1093 .peek_cq = rxe_peek_cq,
1094 .poll_cq = rxe_poll_cq,
1095 .post_recv = rxe_post_recv,
1096 .post_send = rxe_post_send,
1097 .post_srq_recv = rxe_post_srq_recv,
1098 .query_ah = rxe_query_ah,
1099 .query_device = rxe_query_device,
1100 .query_pkey = rxe_query_pkey,
1101 .query_port = rxe_query_port,
1102 .query_qp = rxe_query_qp,
1103 .query_srq = rxe_query_srq,
1104 .reg_user_mr = rxe_reg_user_mr,
1105 .req_notify_cq = rxe_req_notify_cq,
1106 .resize_cq = rxe_resize_cq,
1108 INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah),
1109 INIT_RDMA_OBJ_SIZE(ib_cq, rxe_cq, ibcq),
1110 INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
1111 INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
1112 INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
1115 int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
1117 int err;
1118 struct ib_device *dev = &rxe->ib_dev;
1119 struct crypto_shash *tfm;
1121 strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1123 dev->node_type = RDMA_NODE_IB_CA;
1124 dev->phys_port_cnt = 1;
1125 dev->num_comp_vectors = num_possible_cpus();
1126 dev->local_dma_lkey = 0;
1127 addrconf_addr_eui48((unsigned char *)&dev->node_guid,
1128 rxe->ndev->dev_addr);
1130 dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND) |
1131 BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ);
1133 ib_set_device_ops(dev, &rxe_dev_ops);
1134 err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1);
1135 if (err)
1136 return err;
1138 tfm = crypto_alloc_shash("crc32", 0, 0);
1139 if (IS_ERR(tfm)) {
1140 pr_err("failed to allocate crc algorithm err:%ld\n",
1141 PTR_ERR(tfm));
1142 return PTR_ERR(tfm);
1144 rxe->tfm = tfm;
1146 rdma_set_device_sysfs_group(dev, &rxe_attr_group);
1147 err = ib_register_device(dev, ibdev_name, NULL);
1148 if (err)
1149 pr_warn("%s failed with error %d\n", __func__, err);
1152 * Note that rxe may be invalid at this point if another thread
1153 * unregistered it.
1155 return err;