2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/err.h>
35 #include <linux/vmalloc.h>
37 #include "ipath_verbs.h"
40 * ipath_cq_enter - add a new entry to the completion queue
41 * @cq: completion queue
42 * @entry: work completion entry to add
43 * @sig: true if @entry is a solicitated entry
45 * This may be called with qp->s_lock held.
47 void ipath_cq_enter(struct ipath_cq
*cq
, struct ib_wc
*entry
, int solicited
)
49 struct ipath_cq_wc
*wc
;
54 spin_lock_irqsave(&cq
->lock
, flags
);
57 * Note that the head pointer might be writable by user processes.
58 * Take care to verify it is a sane value.
62 if (head
>= (unsigned) cq
->ibcq
.cqe
) {
67 if (unlikely(next
== wc
->tail
)) {
68 spin_unlock_irqrestore(&cq
->lock
, flags
);
69 if (cq
->ibcq
.event_handler
) {
72 ev
.device
= cq
->ibcq
.device
;
73 ev
.element
.cq
= &cq
->ibcq
;
74 ev
.event
= IB_EVENT_CQ_ERR
;
75 cq
->ibcq
.event_handler(&ev
, cq
->ibcq
.cq_context
);
79 wc
->queue
[head
].wr_id
= entry
->wr_id
;
80 wc
->queue
[head
].status
= entry
->status
;
81 wc
->queue
[head
].opcode
= entry
->opcode
;
82 wc
->queue
[head
].vendor_err
= entry
->vendor_err
;
83 wc
->queue
[head
].byte_len
= entry
->byte_len
;
84 wc
->queue
[head
].imm_data
= (__u32 __force
)entry
->imm_data
;
85 wc
->queue
[head
].qp_num
= entry
->qp
->qp_num
;
86 wc
->queue
[head
].src_qp
= entry
->src_qp
;
87 wc
->queue
[head
].wc_flags
= entry
->wc_flags
;
88 wc
->queue
[head
].pkey_index
= entry
->pkey_index
;
89 wc
->queue
[head
].slid
= entry
->slid
;
90 wc
->queue
[head
].sl
= entry
->sl
;
91 wc
->queue
[head
].dlid_path_bits
= entry
->dlid_path_bits
;
92 wc
->queue
[head
].port_num
= entry
->port_num
;
95 if (cq
->notify
== IB_CQ_NEXT_COMP
||
96 (cq
->notify
== IB_CQ_SOLICITED
&& solicited
)) {
97 cq
->notify
= IB_CQ_NONE
;
100 * This will cause send_complete() to be called in
103 tasklet_hi_schedule(&cq
->comptask
);
106 spin_unlock_irqrestore(&cq
->lock
, flags
);
108 if (entry
->status
!= IB_WC_SUCCESS
)
109 to_idev(cq
->ibcq
.device
)->n_wqe_errs
++;
113 * ipath_poll_cq - poll for work completion entries
114 * @ibcq: the completion queue to poll
115 * @num_entries: the maximum number of entries to return
116 * @entry: pointer to array where work completions are placed
118 * Returns the number of completion entries polled.
120 * This may be called from interrupt context. Also called by ib_poll_cq()
121 * in the generic verbs code.
123 int ipath_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*entry
)
125 struct ipath_cq
*cq
= to_icq(ibcq
);
126 struct ipath_cq_wc
*wc
;
131 spin_lock_irqsave(&cq
->lock
, flags
);
135 if (tail
> (u32
) cq
->ibcq
.cqe
)
136 tail
= (u32
) cq
->ibcq
.cqe
;
137 for (npolled
= 0; npolled
< num_entries
; ++npolled
, ++entry
) {
140 if (tail
== wc
->head
)
143 qp
= ipath_lookup_qpn(&to_idev(cq
->ibcq
.device
)->qp_table
,
144 wc
->queue
[tail
].qp_num
);
145 entry
->qp
= &qp
->ibqp
;
146 if (atomic_dec_and_test(&qp
->refcount
))
149 entry
->wr_id
= wc
->queue
[tail
].wr_id
;
150 entry
->status
= wc
->queue
[tail
].status
;
151 entry
->opcode
= wc
->queue
[tail
].opcode
;
152 entry
->vendor_err
= wc
->queue
[tail
].vendor_err
;
153 entry
->byte_len
= wc
->queue
[tail
].byte_len
;
154 entry
->imm_data
= wc
->queue
[tail
].imm_data
;
155 entry
->src_qp
= wc
->queue
[tail
].src_qp
;
156 entry
->wc_flags
= wc
->queue
[tail
].wc_flags
;
157 entry
->pkey_index
= wc
->queue
[tail
].pkey_index
;
158 entry
->slid
= wc
->queue
[tail
].slid
;
159 entry
->sl
= wc
->queue
[tail
].sl
;
160 entry
->dlid_path_bits
= wc
->queue
[tail
].dlid_path_bits
;
161 entry
->port_num
= wc
->queue
[tail
].port_num
;
162 if (tail
>= cq
->ibcq
.cqe
)
169 spin_unlock_irqrestore(&cq
->lock
, flags
);
174 static void send_complete(unsigned long data
)
176 struct ipath_cq
*cq
= (struct ipath_cq
*)data
;
179 * The completion handler will most likely rearm the notification
180 * and poll for all pending entries. If a new completion entry
181 * is added while we are in this routine, tasklet_hi_schedule()
182 * won't call us again until we return so we check triggered to
183 * see if we need to call the handler again.
186 u8 triggered
= cq
->triggered
;
188 cq
->ibcq
.comp_handler(&cq
->ibcq
, cq
->ibcq
.cq_context
);
190 if (cq
->triggered
== triggered
)
196 * ipath_create_cq - create a completion queue
197 * @ibdev: the device this completion queue is attached to
198 * @entries: the minimum size of the completion queue
199 * @context: unused by the InfiniPath driver
200 * @udata: unused by the InfiniPath driver
202 * Returns a pointer to the completion queue or negative errno values
205 * Called by ib_create_cq() in the generic verbs code.
207 struct ib_cq
*ipath_create_cq(struct ib_device
*ibdev
, int entries
, int comp_vector
,
208 struct ib_ucontext
*context
,
209 struct ib_udata
*udata
)
211 struct ipath_ibdev
*dev
= to_idev(ibdev
);
213 struct ipath_cq_wc
*wc
;
216 if (entries
< 1 || entries
> ib_ipath_max_cqes
) {
217 ret
= ERR_PTR(-EINVAL
);
221 /* Allocate the completion queue structure. */
222 cq
= kmalloc(sizeof(*cq
), GFP_KERNEL
);
224 ret
= ERR_PTR(-ENOMEM
);
229 * Allocate the completion queue entries and head/tail pointers.
230 * This is allocated separately so that it can be resized and
231 * also mapped into user space.
232 * We need to use vmalloc() in order to support mmap and large
233 * numbers of entries.
235 wc
= vmalloc_user(sizeof(*wc
) + sizeof(struct ib_wc
) * entries
);
237 ret
= ERR_PTR(-ENOMEM
);
242 * Return the address of the WC as the offset to mmap.
243 * See ipath_mmap() for details.
245 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
247 u32 s
= sizeof *wc
+ sizeof(struct ib_wc
) * entries
;
249 cq
->ip
= ipath_create_mmap_info(dev
, s
, context
, wc
);
251 ret
= ERR_PTR(-ENOMEM
);
255 err
= ib_copy_to_udata(udata
, &cq
->ip
->offset
,
256 sizeof(cq
->ip
->offset
));
264 spin_lock(&dev
->n_cqs_lock
);
265 if (dev
->n_cqs_allocated
== ib_ipath_max_cqs
) {
266 spin_unlock(&dev
->n_cqs_lock
);
267 ret
= ERR_PTR(-ENOMEM
);
271 dev
->n_cqs_allocated
++;
272 spin_unlock(&dev
->n_cqs_lock
);
275 spin_lock_irq(&dev
->pending_lock
);
276 list_add(&cq
->ip
->pending_mmaps
, &dev
->pending_mmaps
);
277 spin_unlock_irq(&dev
->pending_lock
);
281 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
282 * The number of entries should be >= the number requested or return
285 cq
->ibcq
.cqe
= entries
;
286 cq
->notify
= IB_CQ_NONE
;
288 spin_lock_init(&cq
->lock
);
289 tasklet_init(&cq
->comptask
, send_complete
, (unsigned long)cq
);
309 * ipath_destroy_cq - destroy a completion queue
310 * @ibcq: the completion queue to destroy.
312 * Returns 0 for success.
314 * Called by ib_destroy_cq() in the generic verbs code.
316 int ipath_destroy_cq(struct ib_cq
*ibcq
)
318 struct ipath_ibdev
*dev
= to_idev(ibcq
->device
);
319 struct ipath_cq
*cq
= to_icq(ibcq
);
321 tasklet_kill(&cq
->comptask
);
322 spin_lock(&dev
->n_cqs_lock
);
323 dev
->n_cqs_allocated
--;
324 spin_unlock(&dev
->n_cqs_lock
);
326 kref_put(&cq
->ip
->ref
, ipath_release_mmap_info
);
335 * ipath_req_notify_cq - change the notification type for a completion queue
336 * @ibcq: the completion queue
337 * @notify_flags: the type of notification to request
339 * Returns 0 for success.
341 * This may be called from interrupt context. Also called by
342 * ib_req_notify_cq() in the generic verbs code.
344 int ipath_req_notify_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags notify_flags
)
346 struct ipath_cq
*cq
= to_icq(ibcq
);
350 spin_lock_irqsave(&cq
->lock
, flags
);
352 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
353 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
355 if (cq
->notify
!= IB_CQ_NEXT_COMP
)
356 cq
->notify
= notify_flags
& IB_CQ_SOLICITED_MASK
;
358 if ((notify_flags
& IB_CQ_REPORT_MISSED_EVENTS
) &&
359 cq
->queue
->head
!= cq
->queue
->tail
)
362 spin_unlock_irqrestore(&cq
->lock
, flags
);
368 * ipath_resize_cq - change the size of the CQ
369 * @ibcq: the completion queue
371 * Returns 0 for success.
373 int ipath_resize_cq(struct ib_cq
*ibcq
, int cqe
, struct ib_udata
*udata
)
375 struct ipath_cq
*cq
= to_icq(ibcq
);
376 struct ipath_cq_wc
*old_wc
;
377 struct ipath_cq_wc
*wc
;
381 if (cqe
< 1 || cqe
> ib_ipath_max_cqes
) {
387 * Need to use vmalloc() if we want to support large #s of entries.
389 wc
= vmalloc_user(sizeof(*wc
) + sizeof(struct ib_wc
) * cqe
);
396 * Return the address of the WC as the offset to mmap.
397 * See ipath_mmap() for details.
399 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
400 __u64 offset
= (__u64
) wc
;
402 ret
= ib_copy_to_udata(udata
, &offset
, sizeof(offset
));
407 spin_lock_irq(&cq
->lock
);
409 * Make sure head and tail are sane since they
410 * might be user writable.
414 if (head
> (u32
) cq
->ibcq
.cqe
)
415 head
= (u32
) cq
->ibcq
.cqe
;
417 if (tail
> (u32
) cq
->ibcq
.cqe
)
418 tail
= (u32
) cq
->ibcq
.cqe
;
420 n
= cq
->ibcq
.cqe
+ 1 + head
- tail
;
423 if (unlikely((u32
)cqe
< n
)) {
424 spin_unlock_irq(&cq
->lock
);
429 for (n
= 0; tail
!= head
; n
++) {
430 wc
->queue
[n
] = old_wc
->queue
[tail
];
431 if (tail
== (u32
) cq
->ibcq
.cqe
)
440 spin_unlock_irq(&cq
->lock
);
445 struct ipath_ibdev
*dev
= to_idev(ibcq
->device
);
446 struct ipath_mmap_info
*ip
= cq
->ip
;
447 u32 s
= sizeof *wc
+ sizeof(struct ib_wc
) * cqe
;
449 ipath_update_mmap_info(dev
, ip
, s
, wc
);
450 spin_lock_irq(&dev
->pending_lock
);
451 if (list_empty(&ip
->pending_mmaps
))
452 list_add(&ip
->pending_mmaps
, &dev
->pending_mmaps
);
453 spin_unlock_irq(&dev
->pending_lock
);