2 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/err.h>
34 #include <linux/vmalloc.h>
36 #include "ipath_verbs.h"
39 * ipath_post_srq_receive - post a receive on a shared receive queue
40 * @ibsrq: the SRQ to post the receive on
41 * @wr: the list of work requests to post
42 * @bad_wr: the first WR to cause a problem is put here
44 * This may be called from interrupt context.
46 int ipath_post_srq_receive(struct ib_srq
*ibsrq
, struct ib_recv_wr
*wr
,
47 struct ib_recv_wr
**bad_wr
)
49 struct ipath_srq
*srq
= to_isrq(ibsrq
);
50 struct ipath_ibdev
*dev
= to_idev(ibsrq
->device
);
54 for (; wr
; wr
= wr
->next
) {
55 struct ipath_rwqe
*wqe
;
59 if (wr
->num_sge
> srq
->rq
.max_sge
) {
65 spin_lock_irqsave(&srq
->rq
.lock
, flags
);
66 next
= srq
->rq
.head
+ 1;
67 if (next
>= srq
->rq
.size
)
69 if (next
== srq
->rq
.tail
) {
70 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);
76 wqe
= get_rwqe_ptr(&srq
->rq
, srq
->rq
.head
);
77 wqe
->wr_id
= wr
->wr_id
;
78 wqe
->sg_list
[0].mr
= NULL
;
79 wqe
->sg_list
[0].vaddr
= NULL
;
80 wqe
->sg_list
[0].length
= 0;
81 wqe
->sg_list
[0].sge_length
= 0;
83 for (i
= 0, j
= 0; i
< wr
->num_sge
; i
++) {
85 if (to_ipd(srq
->ibsrq
.pd
)->user
&&
86 wr
->sg_list
[i
].lkey
== 0) {
87 spin_unlock_irqrestore(&srq
->rq
.lock
,
93 if (wr
->sg_list
[i
].length
== 0)
95 if (!ipath_lkey_ok(&dev
->lk_table
,
98 IB_ACCESS_LOCAL_WRITE
)) {
99 spin_unlock_irqrestore(&srq
->rq
.lock
,
105 wqe
->length
+= wr
->sg_list
[i
].length
;
110 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);
119 * ipath_create_srq - create a shared receive queue
120 * @ibpd: the protection domain of the SRQ to create
121 * @attr: the attributes of the SRQ
122 * @udata: not used by the InfiniPath verbs driver
124 struct ib_srq
*ipath_create_srq(struct ib_pd
*ibpd
,
125 struct ib_srq_init_attr
*srq_init_attr
,
126 struct ib_udata
*udata
)
128 struct ipath_srq
*srq
;
132 if (srq_init_attr
->attr
.max_sge
< 1) {
133 ret
= ERR_PTR(-EINVAL
);
137 srq
= kmalloc(sizeof(*srq
), GFP_KERNEL
);
139 ret
= ERR_PTR(-ENOMEM
);
144 * Need to use vmalloc() if we want to support large #s of entries.
146 srq
->rq
.size
= srq_init_attr
->attr
.max_wr
+ 1;
147 sz
= sizeof(struct ipath_sge
) * srq_init_attr
->attr
.max_sge
+
148 sizeof(struct ipath_rwqe
);
149 srq
->rq
.wq
= vmalloc(srq
->rq
.size
* sz
);
152 ret
= ERR_PTR(-ENOMEM
);
157 * ib_create_srq() will initialize srq->ibsrq.
159 spin_lock_init(&srq
->rq
.lock
);
162 srq
->rq
.max_sge
= srq_init_attr
->attr
.max_sge
;
163 srq
->limit
= srq_init_attr
->attr
.srq_limit
;
172 * ipath_modify_srq - modify a shared receive queue
173 * @ibsrq: the SRQ to modify
174 * @attr: the new attributes of the SRQ
175 * @attr_mask: indicates which attributes to modify
177 int ipath_modify_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
,
178 enum ib_srq_attr_mask attr_mask
)
180 struct ipath_srq
*srq
= to_isrq(ibsrq
);
184 if (attr_mask
& IB_SRQ_LIMIT
) {
185 spin_lock_irqsave(&srq
->rq
.lock
, flags
);
186 srq
->limit
= attr
->srq_limit
;
187 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);
189 if (attr_mask
& IB_SRQ_MAX_WR
) {
190 u32 size
= attr
->max_wr
+ 1;
191 struct ipath_rwqe
*wq
, *p
;
195 if (attr
->max_sge
< srq
->rq
.max_sge
) {
200 sz
= sizeof(struct ipath_rwqe
) +
201 attr
->max_sge
* sizeof(struct ipath_sge
);
202 wq
= vmalloc(size
* sz
);
208 spin_lock_irqsave(&srq
->rq
.lock
, flags
);
209 if (srq
->rq
.head
< srq
->rq
.tail
)
210 n
= srq
->rq
.size
+ srq
->rq
.head
- srq
->rq
.tail
;
212 n
= srq
->rq
.head
- srq
->rq
.tail
;
213 if (size
<= n
|| size
<= srq
->limit
) {
214 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);
221 while (srq
->rq
.tail
!= srq
->rq
.head
) {
222 struct ipath_rwqe
*wqe
;
225 wqe
= get_rwqe_ptr(&srq
->rq
, srq
->rq
.tail
);
226 p
->wr_id
= wqe
->wr_id
;
227 p
->length
= wqe
->length
;
228 p
->num_sge
= wqe
->num_sge
;
229 for (i
= 0; i
< wqe
->num_sge
; i
++)
230 p
->sg_list
[i
] = wqe
->sg_list
[i
];
232 p
= (struct ipath_rwqe
*)((char *) p
+ sz
);
233 if (++srq
->rq
.tail
>= srq
->rq
.size
)
241 srq
->rq
.max_sge
= attr
->max_sge
;
242 spin_unlock_irqrestore(&srq
->rq
.lock
, flags
);
251 int ipath_query_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
)
253 struct ipath_srq
*srq
= to_isrq(ibsrq
);
255 attr
->max_wr
= srq
->rq
.size
- 1;
256 attr
->max_sge
= srq
->rq
.max_sge
;
257 attr
->srq_limit
= srq
->limit
;
262 * ipath_destroy_srq - destroy a shared receive queue
263 * @ibsrq: the SRQ to destroy
265 int ipath_destroy_srq(struct ib_srq
*ibsrq
)
267 struct ipath_srq
*srq
= to_isrq(ibsrq
);