2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * Completion queue handling
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Khadija Souissi <souissi@de.ibm.com>
8 * Reinhard Ernst <rernst@de.ibm.com>
9 * Heiko J Schick <schickhj@de.ibm.com>
10 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
13 * Copyright (c) 2005 IBM Corporation
15 * All rights reserved.
17 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions are met:
25 * Redistributions of source code must retain the above copyright notice, this
26 * list of conditions and the following disclaimer.
28 * Redistributions in binary form must reproduce the above copyright notice,
29 * this list of conditions and the following disclaimer in the documentation
30 * and/or other materials
31 * provided with the distribution.
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
34 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
37 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
38 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
39 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
40 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
41 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
43 * POSSIBILITY OF SUCH DAMAGE.
46 #include <linux/slab.h>
48 #include "ehca_iverbs.h"
49 #include "ehca_classes.h"
53 static struct kmem_cache
*cq_cache
;
55 int ehca_cq_assign_qp(struct ehca_cq
*cq
, struct ehca_qp
*qp
)
57 unsigned int qp_num
= qp
->real_qp_num
;
58 unsigned int key
= qp_num
& (QP_HASHTAB_LEN
-1);
61 spin_lock_irqsave(&cq
->spinlock
, flags
);
62 hlist_add_head(&qp
->list_entries
, &cq
->qp_hashtab
[key
]);
63 spin_unlock_irqrestore(&cq
->spinlock
, flags
);
65 ehca_dbg(cq
->ib_cq
.device
, "cq_num=%x real_qp_num=%x",
66 cq
->cq_number
, qp_num
);
71 int ehca_cq_unassign_qp(struct ehca_cq
*cq
, unsigned int real_qp_num
)
74 unsigned int key
= real_qp_num
& (QP_HASHTAB_LEN
-1);
75 struct hlist_node
*iter
;
79 spin_lock_irqsave(&cq
->spinlock
, flags
);
80 hlist_for_each(iter
, &cq
->qp_hashtab
[key
]) {
81 qp
= hlist_entry(iter
, struct ehca_qp
, list_entries
);
82 if (qp
->real_qp_num
== real_qp_num
) {
84 ehca_dbg(cq
->ib_cq
.device
,
85 "removed qp from cq .cq_num=%x real_qp_num=%x",
86 cq
->cq_number
, real_qp_num
);
91 spin_unlock_irqrestore(&cq
->spinlock
, flags
);
93 ehca_err(cq
->ib_cq
.device
,
94 "qp not found cq_num=%x real_qp_num=%x",
95 cq
->cq_number
, real_qp_num
);
100 struct ehca_qp
*ehca_cq_get_qp(struct ehca_cq
*cq
, int real_qp_num
)
102 struct ehca_qp
*ret
= NULL
;
103 unsigned int key
= real_qp_num
& (QP_HASHTAB_LEN
-1);
104 struct hlist_node
*iter
;
106 hlist_for_each(iter
, &cq
->qp_hashtab
[key
]) {
107 qp
= hlist_entry(iter
, struct ehca_qp
, list_entries
);
108 if (qp
->real_qp_num
== real_qp_num
) {
116 struct ib_cq
*ehca_create_cq(struct ib_device
*device
, int cqe
, int comp_vector
,
117 struct ib_ucontext
*context
,
118 struct ib_udata
*udata
)
120 static const u32 additional_cqe
= 20;
122 struct ehca_cq
*my_cq
;
123 struct ehca_shca
*shca
=
124 container_of(device
, struct ehca_shca
, ib_device
);
125 struct ipz_adapter_handle adapter_handle
;
126 struct ehca_alloc_cq_parms param
; /* h_call's out parameters */
130 u64 rpage
, cqx_fec
, h_ret
;
134 if (cqe
>= 0xFFFFFFFF - 64 - additional_cqe
)
135 return ERR_PTR(-EINVAL
);
137 if (!atomic_add_unless(&shca
->num_cqs
, 1, shca
->max_num_cqs
)) {
138 ehca_err(device
, "Unable to create CQ, max number of %i "
139 "CQs reached.", shca
->max_num_cqs
);
140 ehca_err(device
, "To increase the maximum number of CQs "
141 "use the number_of_cqs module parameter.\n");
142 return ERR_PTR(-ENOSPC
);
145 my_cq
= kmem_cache_zalloc(cq_cache
, GFP_KERNEL
);
147 ehca_err(device
, "Out of memory for ehca_cq struct device=%p",
149 atomic_dec(&shca
->num_cqs
);
150 return ERR_PTR(-ENOMEM
);
153 memset(¶m
, 0, sizeof(struct ehca_alloc_cq_parms
));
155 spin_lock_init(&my_cq
->spinlock
);
156 spin_lock_init(&my_cq
->cb_lock
);
157 spin_lock_init(&my_cq
->task_lock
);
158 atomic_set(&my_cq
->nr_events
, 0);
159 init_waitqueue_head(&my_cq
->wait_completion
);
163 adapter_handle
= shca
->ipz_hca_handle
;
164 param
.eq_handle
= shca
->eq
.ipz_eq_handle
;
166 idr_preload(GFP_KERNEL
);
167 write_lock_irqsave(&ehca_cq_idr_lock
, flags
);
168 my_cq
->token
= idr_alloc(&ehca_cq_idr
, my_cq
, 0, 0x2000000, GFP_NOWAIT
);
169 write_unlock_irqrestore(&ehca_cq_idr_lock
, flags
);
172 if (my_cq
->token
< 0) {
173 cq
= ERR_PTR(-ENOMEM
);
174 ehca_err(device
, "Can't allocate new idr entry. device=%p",
176 goto create_cq_exit1
;
180 * CQs maximum depth is 4GB-64, but we need additional 20 as buffer
181 * for receiving errors CQEs.
183 param
.nr_cqe
= cqe
+ additional_cqe
;
184 h_ret
= hipz_h_alloc_resource_cq(adapter_handle
, my_cq
, ¶m
);
186 if (h_ret
!= H_SUCCESS
) {
187 ehca_err(device
, "hipz_h_alloc_resource_cq() failed "
188 "h_ret=%lli device=%p", h_ret
, device
);
189 cq
= ERR_PTR(ehca2ib_return_code(h_ret
));
190 goto create_cq_exit2
;
193 ipz_rc
= ipz_queue_ctor(NULL
, &my_cq
->ipz_queue
, param
.act_pages
,
194 EHCA_PAGESIZE
, sizeof(struct ehca_cqe
), 0, 0);
196 ehca_err(device
, "ipz_queue_ctor() failed ipz_rc=%i device=%p",
198 cq
= ERR_PTR(-EINVAL
);
199 goto create_cq_exit3
;
202 for (counter
= 0; counter
< param
.act_pages
; counter
++) {
203 vpage
= ipz_qpageit_get_inc(&my_cq
->ipz_queue
);
205 ehca_err(device
, "ipz_qpageit_get_inc() "
206 "returns NULL device=%p", device
);
207 cq
= ERR_PTR(-EAGAIN
);
208 goto create_cq_exit4
;
212 h_ret
= hipz_h_register_rpage_cq(adapter_handle
,
213 my_cq
->ipz_cq_handle
,
222 if (h_ret
< H_SUCCESS
) {
223 ehca_err(device
, "hipz_h_register_rpage_cq() failed "
224 "ehca_cq=%p cq_num=%x h_ret=%lli counter=%i "
225 "act_pages=%i", my_cq
, my_cq
->cq_number
,
226 h_ret
, counter
, param
.act_pages
);
227 cq
= ERR_PTR(-EINVAL
);
228 goto create_cq_exit4
;
231 if (counter
== (param
.act_pages
- 1)) {
232 vpage
= ipz_qpageit_get_inc(&my_cq
->ipz_queue
);
233 if ((h_ret
!= H_SUCCESS
) || vpage
) {
234 ehca_err(device
, "Registration of pages not "
235 "complete ehca_cq=%p cq_num=%x "
236 "h_ret=%lli", my_cq
, my_cq
->cq_number
,
238 cq
= ERR_PTR(-EAGAIN
);
239 goto create_cq_exit4
;
242 if (h_ret
!= H_PAGE_REGISTERED
) {
243 ehca_err(device
, "Registration of page failed "
244 "ehca_cq=%p cq_num=%x h_ret=%lli "
245 "counter=%i act_pages=%i",
246 my_cq
, my_cq
->cq_number
,
247 h_ret
, counter
, param
.act_pages
);
248 cq
= ERR_PTR(-ENOMEM
);
249 goto create_cq_exit4
;
254 ipz_qeit_reset(&my_cq
->ipz_queue
);
256 gal
= my_cq
->galpas
.kernel
;
257 cqx_fec
= hipz_galpa_load(gal
, CQTEMM_OFFSET(cqx_fec
));
258 ehca_dbg(device
, "ehca_cq=%p cq_num=%x CQX_FEC=%llx",
259 my_cq
, my_cq
->cq_number
, cqx_fec
);
261 my_cq
->ib_cq
.cqe
= my_cq
->nr_of_entries
=
262 param
.act_nr_of_entries
- additional_cqe
;
263 my_cq
->cq_number
= (my_cq
->ipz_cq_handle
.handle
) & 0xffff;
265 for (i
= 0; i
< QP_HASHTAB_LEN
; i
++)
266 INIT_HLIST_HEAD(&my_cq
->qp_hashtab
[i
]);
268 INIT_LIST_HEAD(&my_cq
->sqp_err_list
);
269 INIT_LIST_HEAD(&my_cq
->rqp_err_list
);
272 struct ipz_queue
*ipz_queue
= &my_cq
->ipz_queue
;
273 struct ehca_create_cq_resp resp
;
274 memset(&resp
, 0, sizeof(resp
));
275 resp
.cq_number
= my_cq
->cq_number
;
276 resp
.token
= my_cq
->token
;
277 resp
.ipz_queue
.qe_size
= ipz_queue
->qe_size
;
278 resp
.ipz_queue
.act_nr_of_sg
= ipz_queue
->act_nr_of_sg
;
279 resp
.ipz_queue
.queue_length
= ipz_queue
->queue_length
;
280 resp
.ipz_queue
.pagesize
= ipz_queue
->pagesize
;
281 resp
.ipz_queue
.toggle_state
= ipz_queue
->toggle_state
;
282 resp
.fw_handle_ofs
= (u32
)
283 (my_cq
->galpas
.user
.fw_handle
& (PAGE_SIZE
- 1));
284 if (ib_copy_to_udata(udata
, &resp
, sizeof(resp
))) {
285 ehca_err(device
, "Copy to udata failed.");
286 cq
= ERR_PTR(-EFAULT
);
287 goto create_cq_exit4
;
294 ipz_queue_dtor(NULL
, &my_cq
->ipz_queue
);
297 h_ret
= hipz_h_destroy_cq(adapter_handle
, my_cq
, 1);
298 if (h_ret
!= H_SUCCESS
)
299 ehca_err(device
, "hipz_h_destroy_cq() failed ehca_cq=%p "
300 "cq_num=%x h_ret=%lli", my_cq
, my_cq
->cq_number
, h_ret
);
303 write_lock_irqsave(&ehca_cq_idr_lock
, flags
);
304 idr_remove(&ehca_cq_idr
, my_cq
->token
);
305 write_unlock_irqrestore(&ehca_cq_idr_lock
, flags
);
308 kmem_cache_free(cq_cache
, my_cq
);
310 atomic_dec(&shca
->num_cqs
);
314 int ehca_destroy_cq(struct ib_cq
*cq
)
317 struct ehca_cq
*my_cq
= container_of(cq
, struct ehca_cq
, ib_cq
);
318 int cq_num
= my_cq
->cq_number
;
319 struct ib_device
*device
= cq
->device
;
320 struct ehca_shca
*shca
= container_of(device
, struct ehca_shca
,
322 struct ipz_adapter_handle adapter_handle
= shca
->ipz_hca_handle
;
326 if (my_cq
->mm_count_galpa
|| my_cq
->mm_count_queue
) {
327 ehca_err(device
, "Resources still referenced in "
328 "user space cq_num=%x", my_cq
->cq_number
);
334 * remove the CQ from the idr first to make sure
335 * no more interrupt tasklets will touch this CQ
337 write_lock_irqsave(&ehca_cq_idr_lock
, flags
);
338 idr_remove(&ehca_cq_idr
, my_cq
->token
);
339 write_unlock_irqrestore(&ehca_cq_idr_lock
, flags
);
341 /* now wait until all pending events have completed */
342 wait_event(my_cq
->wait_completion
, !atomic_read(&my_cq
->nr_events
));
344 /* nobody's using our CQ any longer -- we can destroy it */
345 h_ret
= hipz_h_destroy_cq(adapter_handle
, my_cq
, 0);
346 if (h_ret
== H_R_STATE
) {
347 /* cq in err: read err data and destroy it forcibly */
348 ehca_dbg(device
, "ehca_cq=%p cq_num=%x resource=%llx in err "
349 "state. Try to delete it forcibly.",
350 my_cq
, cq_num
, my_cq
->ipz_cq_handle
.handle
);
351 ehca_error_data(shca
, my_cq
, my_cq
->ipz_cq_handle
.handle
);
352 h_ret
= hipz_h_destroy_cq(adapter_handle
, my_cq
, 1);
353 if (h_ret
== H_SUCCESS
)
354 ehca_dbg(device
, "cq_num=%x deleted successfully.",
357 if (h_ret
!= H_SUCCESS
) {
358 ehca_err(device
, "hipz_h_destroy_cq() failed h_ret=%lli "
359 "ehca_cq=%p cq_num=%x", h_ret
, my_cq
, cq_num
);
360 return ehca2ib_return_code(h_ret
);
362 ipz_queue_dtor(NULL
, &my_cq
->ipz_queue
);
363 kmem_cache_free(cq_cache
, my_cq
);
365 atomic_dec(&shca
->num_cqs
);
369 int ehca_resize_cq(struct ib_cq
*cq
, int cqe
, struct ib_udata
*udata
)
371 /* TODO: proper resize needs to be done */
372 ehca_err(cq
->device
, "not implemented yet");
377 int ehca_init_cq_cache(void)
379 cq_cache
= kmem_cache_create("ehca_cache_cq",
380 sizeof(struct ehca_cq
), 0,
388 void ehca_cleanup_cq_cache(void)
391 kmem_cache_destroy(cq_cache
);