2 * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of EITHER the GNU General Public License
6 * version 2 as published by the Free Software Foundation or the BSD
7 * 2-Clause License. This program is distributed in the hope that it
8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10 * See the GNU General Public License version 2 for more details at
11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
13 * You should have received a copy of the GNU General Public License
14 * along with this program available in the file COPYING in the main
15 * directory of this source tree.
17 * The BSD 2-Clause License
19 * Redistribution and use in source and binary forms, with or
20 * without modification, are permitted provided that the following
23 * - Redistributions of source code must retain the above
24 * copyright notice, this list of conditions and the following
27 * - Redistributions in binary form must reproduce the above
28 * copyright notice, this list of conditions and the following
29 * disclaimer in the documentation and/or other materials
30 * provided with the distribution.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/wait.h>
49 #include <rdma/ib_addr.h>
50 #include <rdma/ib_smi.h>
51 #include <rdma/ib_user_verbs.h>
56 * pvrdma_req_notify_cq - request notification for a completion queue
57 * @ibcq: the completion queue
58 * @notify_flags: notification flags
60 * @return: 0 for success.
62 int pvrdma_req_notify_cq(struct ib_cq
*ibcq
,
63 enum ib_cq_notify_flags notify_flags
)
65 struct pvrdma_dev
*dev
= to_vdev(ibcq
->device
);
66 struct pvrdma_cq
*cq
= to_vcq(ibcq
);
67 u32 val
= cq
->cq_handle
;
71 val
|= (notify_flags
& IB_CQ_SOLICITED_MASK
) == IB_CQ_SOLICITED
?
72 PVRDMA_UAR_CQ_ARM_SOL
: PVRDMA_UAR_CQ_ARM
;
74 spin_lock_irqsave(&cq
->cq_lock
, flags
);
76 pvrdma_write_uar_cq(dev
, val
);
78 if (notify_flags
& IB_CQ_REPORT_MISSED_EVENTS
) {
81 has_data
= pvrdma_idx_ring_has_data(&cq
->ring_state
->rx
,
83 if (unlikely(has_data
== PVRDMA_INVALID_IDX
))
84 dev_err(&dev
->pdev
->dev
, "CQ ring state invalid\n");
87 spin_unlock_irqrestore(&cq
->cq_lock
, flags
);
93 * pvrdma_create_cq - create completion queue
95 * @attr: completion queue attributes
96 * @context: user context
99 * @return: ib_cq completion queue pointer on success,
100 * otherwise returns negative errno.
102 struct ib_cq
*pvrdma_create_cq(struct ib_device
*ibdev
,
103 const struct ib_cq_init_attr
*attr
,
104 struct ib_ucontext
*context
,
105 struct ib_udata
*udata
)
107 int entries
= attr
->cqe
;
108 struct pvrdma_dev
*dev
= to_vdev(ibdev
);
109 struct pvrdma_cq
*cq
;
113 union pvrdma_cmd_req req
;
114 union pvrdma_cmd_resp rsp
;
115 struct pvrdma_cmd_create_cq
*cmd
= &req
.create_cq
;
116 struct pvrdma_cmd_create_cq_resp
*resp
= &rsp
.create_cq_resp
;
117 struct pvrdma_create_cq_resp cq_resp
= {0};
118 struct pvrdma_create_cq ucmd
;
120 BUILD_BUG_ON(sizeof(struct pvrdma_cqe
) != 64);
122 entries
= roundup_pow_of_two(entries
);
123 if (entries
< 1 || entries
> dev
->dsr
->caps
.max_cqe
)
124 return ERR_PTR(-EINVAL
);
126 if (!atomic_add_unless(&dev
->num_cqs
, 1, dev
->dsr
->caps
.max_cq
))
127 return ERR_PTR(-ENOMEM
);
129 cq
= kzalloc(sizeof(*cq
), GFP_KERNEL
);
131 atomic_dec(&dev
->num_cqs
);
132 return ERR_PTR(-ENOMEM
);
135 cq
->ibcq
.cqe
= entries
;
136 cq
->is_kernel
= !context
;
138 if (!cq
->is_kernel
) {
139 if (ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
))) {
144 cq
->umem
= ib_umem_get(context
, ucmd
.buf_addr
, ucmd
.buf_size
,
145 IB_ACCESS_LOCAL_WRITE
, 1);
146 if (IS_ERR(cq
->umem
)) {
147 ret
= PTR_ERR(cq
->umem
);
151 npages
= ib_umem_page_count(cq
->umem
);
153 /* One extra page for shared ring state */
154 npages
= 1 + (entries
* sizeof(struct pvrdma_cqe
) +
155 PAGE_SIZE
- 1) / PAGE_SIZE
;
157 /* Skip header page. */
158 cq
->offset
= PAGE_SIZE
;
161 if (npages
< 0 || npages
> PVRDMA_PAGE_DIR_MAX_PAGES
) {
162 dev_warn(&dev
->pdev
->dev
,
163 "overflow pages in completion queue\n");
168 ret
= pvrdma_page_dir_init(dev
, &cq
->pdir
, npages
, cq
->is_kernel
);
170 dev_warn(&dev
->pdev
->dev
,
171 "could not allocate page directory\n");
175 /* Ring state is always the first page. Set in library for user cq. */
177 cq
->ring_state
= cq
->pdir
.pages
[0];
179 pvrdma_page_dir_insert_umem(&cq
->pdir
, cq
->umem
, 0);
181 refcount_set(&cq
->refcnt
, 1);
182 init_completion(&cq
->free
);
183 spin_lock_init(&cq
->cq_lock
);
185 memset(cmd
, 0, sizeof(*cmd
));
186 cmd
->hdr
.cmd
= PVRDMA_CMD_CREATE_CQ
;
187 cmd
->nchunks
= npages
;
188 cmd
->ctx_handle
= (context
) ?
189 (u64
)to_vucontext(context
)->ctx_handle
: 0;
191 cmd
->pdir_dma
= cq
->pdir
.dir_dma
;
192 ret
= pvrdma_cmd_post(dev
, &req
, &rsp
, PVRDMA_CMD_CREATE_CQ_RESP
);
194 dev_warn(&dev
->pdev
->dev
,
195 "could not create completion queue, error: %d\n", ret
);
199 cq
->ibcq
.cqe
= resp
->cqe
;
200 cq
->cq_handle
= resp
->cq_handle
;
201 cq_resp
.cqn
= resp
->cq_handle
;
202 spin_lock_irqsave(&dev
->cq_tbl_lock
, flags
);
203 dev
->cq_tbl
[cq
->cq_handle
% dev
->dsr
->caps
.max_cq
] = cq
;
204 spin_unlock_irqrestore(&dev
->cq_tbl_lock
, flags
);
206 if (!cq
->is_kernel
) {
207 cq
->uar
= &(to_vucontext(context
)->uar
);
209 /* Copy udata back. */
210 if (ib_copy_to_udata(udata
, &cq_resp
, sizeof(cq_resp
))) {
211 dev_warn(&dev
->pdev
->dev
,
212 "failed to copy back udata\n");
213 pvrdma_destroy_cq(&cq
->ibcq
);
214 return ERR_PTR(-EINVAL
);
221 pvrdma_page_dir_cleanup(dev
, &cq
->pdir
);
224 ib_umem_release(cq
->umem
);
226 atomic_dec(&dev
->num_cqs
);
232 static void pvrdma_free_cq(struct pvrdma_dev
*dev
, struct pvrdma_cq
*cq
)
234 if (refcount_dec_and_test(&cq
->refcnt
))
236 wait_for_completion(&cq
->free
);
239 ib_umem_release(cq
->umem
);
241 pvrdma_page_dir_cleanup(dev
, &cq
->pdir
);
246 * pvrdma_destroy_cq - destroy completion queue
247 * @cq: the completion queue to destroy.
249 * @return: 0 for success.
251 int pvrdma_destroy_cq(struct ib_cq
*cq
)
253 struct pvrdma_cq
*vcq
= to_vcq(cq
);
254 union pvrdma_cmd_req req
;
255 struct pvrdma_cmd_destroy_cq
*cmd
= &req
.destroy_cq
;
256 struct pvrdma_dev
*dev
= to_vdev(cq
->device
);
260 memset(cmd
, 0, sizeof(*cmd
));
261 cmd
->hdr
.cmd
= PVRDMA_CMD_DESTROY_CQ
;
262 cmd
->cq_handle
= vcq
->cq_handle
;
264 ret
= pvrdma_cmd_post(dev
, &req
, NULL
, 0);
266 dev_warn(&dev
->pdev
->dev
,
267 "could not destroy completion queue, error: %d\n",
270 /* free cq's resources */
271 spin_lock_irqsave(&dev
->cq_tbl_lock
, flags
);
272 dev
->cq_tbl
[vcq
->cq_handle
] = NULL
;
273 spin_unlock_irqrestore(&dev
->cq_tbl_lock
, flags
);
275 pvrdma_free_cq(dev
, vcq
);
276 atomic_dec(&dev
->num_cqs
);
282 * pvrdma_modify_cq - modify the CQ moderation parameters
283 * @ibcq: the CQ to modify
284 * @cq_count: number of CQEs that will trigger an event
285 * @cq_period: max period of time in usec before triggering an event
287 * @return: -EOPNOTSUPP as CQ resize is not supported.
289 int pvrdma_modify_cq(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
)
294 static inline struct pvrdma_cqe
*get_cqe(struct pvrdma_cq
*cq
, int i
)
296 return (struct pvrdma_cqe
*)pvrdma_page_dir_get_ptr(
299 sizeof(struct pvrdma_cqe
) * i
);
302 void _pvrdma_flush_cqe(struct pvrdma_qp
*qp
, struct pvrdma_cq
*cq
)
311 has_data
= pvrdma_idx_ring_has_data(&cq
->ring_state
->rx
,
312 cq
->ibcq
.cqe
, &head
);
313 if (unlikely(has_data
> 0)) {
316 int tail
= pvrdma_idx(&cq
->ring_state
->rx
.prod_tail
,
318 struct pvrdma_cqe
*cqe
;
319 struct pvrdma_cqe
*curr_cqe
;
321 items
= (tail
> head
) ? (tail
- head
) :
322 (cq
->ibcq
.cqe
- head
+ tail
);
324 while (items
-- > 0) {
326 curr
= cq
->ibcq
.cqe
- 1;
328 tail
= cq
->ibcq
.cqe
- 1;
329 curr_cqe
= get_cqe(cq
, curr
);
330 if ((curr_cqe
->qp
& 0xFFFF) != qp
->qp_handle
) {
332 cqe
= get_cqe(cq
, tail
);
338 &cq
->ring_state
->rx
.cons_head
,
346 static int pvrdma_poll_one(struct pvrdma_cq
*cq
, struct pvrdma_qp
**cur_qp
,
349 struct pvrdma_dev
*dev
= to_vdev(cq
->ibcq
.device
);
353 struct pvrdma_cqe
*cqe
;
356 has_data
= pvrdma_idx_ring_has_data(&cq
->ring_state
->rx
,
357 cq
->ibcq
.cqe
, &head
);
362 pvrdma_write_uar_cq(dev
, cq
->cq_handle
| PVRDMA_UAR_CQ_POLL
);
366 } else if (has_data
== PVRDMA_INVALID_IDX
) {
367 dev_err(&dev
->pdev
->dev
, "CQ ring state invalid\n");
371 cqe
= get_cqe(cq
, head
);
373 /* Ensure cqe is valid. */
375 if (dev
->qp_tbl
[cqe
->qp
& 0xffff])
376 *cur_qp
= (struct pvrdma_qp
*)dev
->qp_tbl
[cqe
->qp
& 0xffff];
380 wc
->opcode
= pvrdma_wc_opcode_to_ib(cqe
->opcode
);
381 wc
->status
= pvrdma_wc_status_to_ib(cqe
->status
);
382 wc
->wr_id
= cqe
->wr_id
;
383 wc
->qp
= &(*cur_qp
)->ibqp
;
384 wc
->byte_len
= cqe
->byte_len
;
385 wc
->ex
.imm_data
= cqe
->imm_data
;
386 wc
->src_qp
= cqe
->src_qp
;
387 wc
->wc_flags
= pvrdma_wc_flags_to_ib(cqe
->wc_flags
);
388 wc
->pkey_index
= cqe
->pkey_index
;
389 wc
->slid
= cqe
->slid
;
391 wc
->dlid_path_bits
= cqe
->dlid_path_bits
;
392 wc
->port_num
= cqe
->port_num
;
393 wc
->vendor_err
= cqe
->vendor_err
;
394 wc
->network_hdr_type
= cqe
->network_hdr_type
;
396 /* Update shared ring state */
397 pvrdma_idx_ring_inc(&cq
->ring_state
->rx
.cons_head
, cq
->ibcq
.cqe
);
403 * pvrdma_poll_cq - poll for work completion queue entries
404 * @ibcq: completion queue
405 * @num_entries: the maximum number of entries
406 * @entry: pointer to work completion array
408 * @return: number of polled completion entries
410 int pvrdma_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*wc
)
412 struct pvrdma_cq
*cq
= to_vcq(ibcq
);
413 struct pvrdma_qp
*cur_qp
= NULL
;
417 if (num_entries
< 1 || wc
== NULL
)
420 spin_lock_irqsave(&cq
->cq_lock
, flags
);
421 for (npolled
= 0; npolled
< num_entries
; ++npolled
) {
422 if (pvrdma_poll_one(cq
, &cur_qp
, wc
+ npolled
))
426 spin_unlock_irqrestore(&cq
->cq_lock
, flags
);
428 /* Ensure we do not return errors from poll_cq */
433 * pvrdma_resize_cq - resize CQ
434 * @ibcq: the completion queue
435 * @entries: CQ entries
438 * @return: -EOPNOTSUPP as CQ resize is not supported.
440 int pvrdma_resize_cq(struct ib_cq
*ibcq
, int entries
, struct ib_udata
*udata
)