WIP FPC-III support
[linux/fpc-iii.git] / drivers / infiniband / sw / rxe / rxe_cq.c
blobb315ebf041ac8be493d1a53c76cbf3487129b1ba
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6 #include <linux/vmalloc.h>
7 #include "rxe.h"
8 #include "rxe_loc.h"
9 #include "rxe_queue.h"
11 int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
12 int cqe, int comp_vector)
14 int count;
16 if (cqe <= 0) {
17 pr_warn("cqe(%d) <= 0\n", cqe);
18 goto err1;
21 if (cqe > rxe->attr.max_cqe) {
22 pr_warn("cqe(%d) > max_cqe(%d)\n",
23 cqe, rxe->attr.max_cqe);
24 goto err1;
27 if (cq) {
28 count = queue_count(cq->queue);
29 if (cqe < count) {
30 pr_warn("cqe(%d) < current # elements in queue (%d)",
31 cqe, count);
32 goto err1;
36 return 0;
38 err1:
39 return -EINVAL;
42 static void rxe_send_complete(struct tasklet_struct *t)
44 struct rxe_cq *cq = from_tasklet(cq, t, comp_task);
45 unsigned long flags;
47 spin_lock_irqsave(&cq->cq_lock, flags);
48 if (cq->is_dying) {
49 spin_unlock_irqrestore(&cq->cq_lock, flags);
50 return;
52 spin_unlock_irqrestore(&cq->cq_lock, flags);
54 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
57 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
58 int comp_vector, struct ib_udata *udata,
59 struct rxe_create_cq_resp __user *uresp)
61 int err;
63 cq->queue = rxe_queue_init(rxe, &cqe,
64 sizeof(struct rxe_cqe));
65 if (!cq->queue) {
66 pr_warn("unable to create cq\n");
67 return -ENOMEM;
70 err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata,
71 cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
72 if (err) {
73 vfree(cq->queue->buf);
74 kfree(cq->queue);
75 return err;
78 if (uresp)
79 cq->is_user = 1;
81 cq->is_dying = false;
83 tasklet_setup(&cq->comp_task, rxe_send_complete);
85 spin_lock_init(&cq->cq_lock);
86 cq->ibcq.cqe = cqe;
87 return 0;
90 int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
91 struct rxe_resize_cq_resp __user *uresp,
92 struct ib_udata *udata)
94 int err;
96 err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe,
97 sizeof(struct rxe_cqe), udata,
98 uresp ? &uresp->mi : NULL, NULL, &cq->cq_lock);
99 if (!err)
100 cq->ibcq.cqe = cqe;
102 return err;
105 int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
107 struct ib_event ev;
108 unsigned long flags;
110 spin_lock_irqsave(&cq->cq_lock, flags);
112 if (unlikely(queue_full(cq->queue))) {
113 spin_unlock_irqrestore(&cq->cq_lock, flags);
114 if (cq->ibcq.event_handler) {
115 ev.device = cq->ibcq.device;
116 ev.element.cq = &cq->ibcq;
117 ev.event = IB_EVENT_CQ_ERR;
118 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
121 return -EBUSY;
124 memcpy(producer_addr(cq->queue), cqe, sizeof(*cqe));
126 advance_producer(cq->queue);
127 spin_unlock_irqrestore(&cq->cq_lock, flags);
129 if ((cq->notify == IB_CQ_NEXT_COMP) ||
130 (cq->notify == IB_CQ_SOLICITED && solicited)) {
131 cq->notify = 0;
132 tasklet_schedule(&cq->comp_task);
135 return 0;
138 void rxe_cq_disable(struct rxe_cq *cq)
140 unsigned long flags;
142 spin_lock_irqsave(&cq->cq_lock, flags);
143 cq->is_dying = true;
144 spin_unlock_irqrestore(&cq->cq_lock, flags);
147 void rxe_cq_cleanup(struct rxe_pool_entry *arg)
149 struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem);
151 if (cq->queue)
152 rxe_queue_cleanup(cq->queue);