2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/vmalloc.h>
36 #include "rxe_queue.h"
38 int rxe_cq_chk_attr(struct rxe_dev
*rxe
, struct rxe_cq
*cq
,
39 int cqe
, int comp_vector
)
44 pr_warn("cqe(%d) <= 0\n", cqe
);
48 if (cqe
> rxe
->attr
.max_cqe
) {
49 pr_warn("cqe(%d) > max_cqe(%d)\n",
50 cqe
, rxe
->attr
.max_cqe
);
55 count
= queue_count(cq
->queue
);
57 pr_warn("cqe(%d) < current # elements in queue (%d)",
69 static void rxe_send_complete(unsigned long data
)
71 struct rxe_cq
*cq
= (struct rxe_cq
*)data
;
74 spin_lock_irqsave(&cq
->cq_lock
, flags
);
76 spin_unlock_irqrestore(&cq
->cq_lock
, flags
);
79 spin_unlock_irqrestore(&cq
->cq_lock
, flags
);
81 cq
->ibcq
.comp_handler(&cq
->ibcq
, cq
->ibcq
.cq_context
);
84 int rxe_cq_from_init(struct rxe_dev
*rxe
, struct rxe_cq
*cq
, int cqe
,
85 int comp_vector
, struct ib_udata
*udata
,
86 struct rxe_create_cq_resp __user
*uresp
)
90 cq
->queue
= rxe_queue_init(rxe
, &cqe
,
91 sizeof(struct rxe_cqe
));
93 pr_warn("unable to create cq\n");
97 err
= do_mmap_info(rxe
, uresp
? &uresp
->mi
: NULL
, udata
,
98 cq
->queue
->buf
, cq
->queue
->buf_size
, &cq
->queue
->ip
);
100 vfree(cq
->queue
->buf
);
108 cq
->is_dying
= false;
110 tasklet_init(&cq
->comp_task
, rxe_send_complete
, (unsigned long)cq
);
112 spin_lock_init(&cq
->cq_lock
);
117 int rxe_cq_resize_queue(struct rxe_cq
*cq
, int cqe
,
118 struct rxe_resize_cq_resp __user
*uresp
,
119 struct ib_udata
*udata
)
123 err
= rxe_queue_resize(cq
->queue
, (unsigned int *)&cqe
,
124 sizeof(struct rxe_cqe
), udata
,
125 uresp
? &uresp
->mi
: NULL
, NULL
, &cq
->cq_lock
);
132 int rxe_cq_post(struct rxe_cq
*cq
, struct rxe_cqe
*cqe
, int solicited
)
137 spin_lock_irqsave(&cq
->cq_lock
, flags
);
139 if (unlikely(queue_full(cq
->queue
))) {
140 spin_unlock_irqrestore(&cq
->cq_lock
, flags
);
141 if (cq
->ibcq
.event_handler
) {
142 ev
.device
= cq
->ibcq
.device
;
143 ev
.element
.cq
= &cq
->ibcq
;
144 ev
.event
= IB_EVENT_CQ_ERR
;
145 cq
->ibcq
.event_handler(&ev
, cq
->ibcq
.cq_context
);
151 memcpy(producer_addr(cq
->queue
), cqe
, sizeof(*cqe
));
153 /* make sure all changes to the CQ are written before we update the
158 advance_producer(cq
->queue
);
159 spin_unlock_irqrestore(&cq
->cq_lock
, flags
);
161 if ((cq
->notify
== IB_CQ_NEXT_COMP
) ||
162 (cq
->notify
== IB_CQ_SOLICITED
&& solicited
)) {
164 tasklet_schedule(&cq
->comp_task
);
170 void rxe_cq_disable(struct rxe_cq
*cq
)
174 spin_lock_irqsave(&cq
->cq_lock
, flags
);
176 spin_unlock_irqrestore(&cq
->cq_lock
, flags
);
179 void rxe_cq_cleanup(struct rxe_pool_entry
*arg
)
181 struct rxe_cq
*cq
= container_of(arg
, typeof(*cq
), pelem
);
184 rxe_queue_cleanup(cq
->queue
);