1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
6 #include <linux/vmalloc.h>
11 int rxe_cq_chk_attr(struct rxe_dev
*rxe
, struct rxe_cq
*cq
,
12 int cqe
, int comp_vector
)
17 pr_warn("cqe(%d) <= 0\n", cqe
);
21 if (cqe
> rxe
->attr
.max_cqe
) {
22 pr_warn("cqe(%d) > max_cqe(%d)\n",
23 cqe
, rxe
->attr
.max_cqe
);
28 count
= queue_count(cq
->queue
);
30 pr_warn("cqe(%d) < current # elements in queue (%d)",
42 static void rxe_send_complete(struct tasklet_struct
*t
)
44 struct rxe_cq
*cq
= from_tasklet(cq
, t
, comp_task
);
47 spin_lock_irqsave(&cq
->cq_lock
, flags
);
49 spin_unlock_irqrestore(&cq
->cq_lock
, flags
);
52 spin_unlock_irqrestore(&cq
->cq_lock
, flags
);
54 cq
->ibcq
.comp_handler(&cq
->ibcq
, cq
->ibcq
.cq_context
);
57 int rxe_cq_from_init(struct rxe_dev
*rxe
, struct rxe_cq
*cq
, int cqe
,
58 int comp_vector
, struct ib_udata
*udata
,
59 struct rxe_create_cq_resp __user
*uresp
)
63 cq
->queue
= rxe_queue_init(rxe
, &cqe
,
64 sizeof(struct rxe_cqe
));
66 pr_warn("unable to create cq\n");
70 err
= do_mmap_info(rxe
, uresp
? &uresp
->mi
: NULL
, udata
,
71 cq
->queue
->buf
, cq
->queue
->buf_size
, &cq
->queue
->ip
);
73 vfree(cq
->queue
->buf
);
83 tasklet_setup(&cq
->comp_task
, rxe_send_complete
);
85 spin_lock_init(&cq
->cq_lock
);
90 int rxe_cq_resize_queue(struct rxe_cq
*cq
, int cqe
,
91 struct rxe_resize_cq_resp __user
*uresp
,
92 struct ib_udata
*udata
)
96 err
= rxe_queue_resize(cq
->queue
, (unsigned int *)&cqe
,
97 sizeof(struct rxe_cqe
), udata
,
98 uresp
? &uresp
->mi
: NULL
, NULL
, &cq
->cq_lock
);
105 int rxe_cq_post(struct rxe_cq
*cq
, struct rxe_cqe
*cqe
, int solicited
)
110 spin_lock_irqsave(&cq
->cq_lock
, flags
);
112 if (unlikely(queue_full(cq
->queue
))) {
113 spin_unlock_irqrestore(&cq
->cq_lock
, flags
);
114 if (cq
->ibcq
.event_handler
) {
115 ev
.device
= cq
->ibcq
.device
;
116 ev
.element
.cq
= &cq
->ibcq
;
117 ev
.event
= IB_EVENT_CQ_ERR
;
118 cq
->ibcq
.event_handler(&ev
, cq
->ibcq
.cq_context
);
124 memcpy(producer_addr(cq
->queue
), cqe
, sizeof(*cqe
));
126 advance_producer(cq
->queue
);
127 spin_unlock_irqrestore(&cq
->cq_lock
, flags
);
129 if ((cq
->notify
== IB_CQ_NEXT_COMP
) ||
130 (cq
->notify
== IB_CQ_SOLICITED
&& solicited
)) {
132 tasklet_schedule(&cq
->comp_task
);
138 void rxe_cq_disable(struct rxe_cq
*cq
)
142 spin_lock_irqsave(&cq
->cq_lock
, flags
);
144 spin_unlock_irqrestore(&cq
->cq_lock
, flags
);
147 void rxe_cq_cleanup(struct rxe_pool_entry
*arg
)
149 struct rxe_cq
*cq
= container_of(arg
, typeof(*cq
), pelem
);
152 rxe_queue_cleanup(cq
->queue
);