WIP FPC-III support
[linux/fpc-iii.git] / drivers / infiniband / hw / i40iw / i40iw_uk.c
blobc3633c9944db4ceb835d3e89727f85a4cad6e829
1 /*******************************************************************************
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
33 *******************************************************************************/
35 #include "i40iw_osdep.h"
36 #include "i40iw_status.h"
37 #include "i40iw_d.h"
38 #include "i40iw_user.h"
39 #include "i40iw_register.h"
41 static u32 nop_signature = 0x55550000;
43 /**
44 * i40iw_nop_1 - insert a nop wqe and move head. no post work
45 * @qp: hw qp ptr
47 static enum i40iw_status_code i40iw_nop_1(struct i40iw_qp_uk *qp)
49 u64 header, *wqe;
50 u64 *wqe_0 = NULL;
51 u32 wqe_idx, peek_head;
52 bool signaled = false;
54 if (!qp->sq_ring.head)
55 return I40IW_ERR_PARAM;
57 wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
58 wqe = qp->sq_base[wqe_idx].elem;
60 qp->sq_wrtrk_array[wqe_idx].wqe_size = I40IW_QP_WQE_MIN_SIZE;
62 peek_head = (qp->sq_ring.head + 1) % qp->sq_ring.size;
63 wqe_0 = qp->sq_base[peek_head].elem;
64 if (peek_head)
65 wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID);
66 else
67 wqe_0[3] = LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
69 set_64bit_val(wqe, 0, 0);
70 set_64bit_val(wqe, 8, 0);
71 set_64bit_val(wqe, 16, 0);
73 header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
74 LS_64(signaled, I40IWQPSQ_SIGCOMPL) |
75 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID) | nop_signature++;
77 wmb(); /* Memory barrier to ensure data is written before valid bit is set */
79 set_64bit_val(wqe, 24, header);
80 return 0;
83 /**
84 * i40iw_qp_post_wr - post wr to hrdware
85 * @qp: hw qp ptr
87 void i40iw_qp_post_wr(struct i40iw_qp_uk *qp)
89 u64 temp;
90 u32 hw_sq_tail;
91 u32 sw_sq_head;
93 mb(); /* valid bit is written and loads completed before reading shadow */
95 /* read the doorbell shadow area */
96 get_64bit_val(qp->shadow_area, 0, &temp);
98 hw_sq_tail = (u32)RS_64(temp, I40IW_QP_DBSA_HW_SQ_TAIL);
99 sw_sq_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
100 if (sw_sq_head != hw_sq_tail) {
101 if (sw_sq_head > qp->initial_ring.head) {
102 if ((hw_sq_tail >= qp->initial_ring.head) &&
103 (hw_sq_tail < sw_sq_head)) {
104 writel(qp->qp_id, qp->wqe_alloc_reg);
106 } else if (sw_sq_head != qp->initial_ring.head) {
107 if ((hw_sq_tail >= qp->initial_ring.head) ||
108 (hw_sq_tail < sw_sq_head)) {
109 writel(qp->qp_id, qp->wqe_alloc_reg);
114 qp->initial_ring.head = qp->sq_ring.head;
118 * i40iw_qp_get_next_send_wqe - return next wqe ptr
119 * @qp: hw qp ptr
120 * @wqe_idx: return wqe index
121 * @wqe_size: size of sq wqe
123 u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp,
124 u32 *wqe_idx,
125 u8 wqe_size,
126 u32 total_size,
127 u64 wr_id
130 u64 *wqe = NULL;
131 u64 wqe_ptr;
132 u32 peek_head = 0;
133 u16 offset;
134 enum i40iw_status_code ret_code = 0;
135 u8 nop_wqe_cnt = 0, i;
136 u64 *wqe_0 = NULL;
138 *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
140 if (!*wqe_idx)
141 qp->swqe_polarity = !qp->swqe_polarity;
142 wqe_ptr = (uintptr_t)qp->sq_base[*wqe_idx].elem;
143 offset = (u16)(wqe_ptr) & 0x7F;
144 if ((offset + wqe_size) > I40IW_QP_WQE_MAX_SIZE) {
145 nop_wqe_cnt = (u8)(I40IW_QP_WQE_MAX_SIZE - offset) / I40IW_QP_WQE_MIN_SIZE;
146 for (i = 0; i < nop_wqe_cnt; i++) {
147 i40iw_nop_1(qp);
148 I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
149 if (ret_code)
150 return NULL;
153 *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
154 if (!*wqe_idx)
155 qp->swqe_polarity = !qp->swqe_polarity;
158 if (((*wqe_idx & 3) == 1) && (wqe_size == I40IW_WQE_SIZE_64)) {
159 i40iw_nop_1(qp);
160 I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
161 if (ret_code)
162 return NULL;
163 *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
164 if (!*wqe_idx)
165 qp->swqe_polarity = !qp->swqe_polarity;
167 I40IW_RING_MOVE_HEAD_BY_COUNT(qp->sq_ring,
168 wqe_size / I40IW_QP_WQE_MIN_SIZE, ret_code);
169 if (ret_code)
170 return NULL;
172 wqe = qp->sq_base[*wqe_idx].elem;
174 peek_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
175 wqe_0 = qp->sq_base[peek_head].elem;
177 if (((peek_head & 3) == 1) || ((peek_head & 3) == 3)) {
178 if (RS_64(wqe_0[3], I40IWQPSQ_VALID) != !qp->swqe_polarity)
179 wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID);
182 qp->sq_wrtrk_array[*wqe_idx].wrid = wr_id;
183 qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
184 qp->sq_wrtrk_array[*wqe_idx].wqe_size = wqe_size;
185 return wqe;
189 * i40iw_set_fragment - set fragment in wqe
190 * @wqe: wqe for setting fragment
191 * @offset: offset value
192 * @sge: sge length and stag
194 static void i40iw_set_fragment(u64 *wqe, u32 offset, struct i40iw_sge *sge)
196 if (sge) {
197 set_64bit_val(wqe, offset, LS_64(sge->tag_off, I40IWQPSQ_FRAG_TO));
198 set_64bit_val(wqe, (offset + 8),
199 (LS_64(sge->len, I40IWQPSQ_FRAG_LEN) |
200 LS_64(sge->stag, I40IWQPSQ_FRAG_STAG)));
205 * i40iw_qp_get_next_recv_wqe - get next qp's rcv wqe
206 * @qp: hw qp ptr
207 * @wqe_idx: return wqe index
209 u64 *i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx)
211 u64 *wqe = NULL;
212 enum i40iw_status_code ret_code;
214 if (I40IW_RING_FULL_ERR(qp->rq_ring))
215 return NULL;
217 I40IW_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
218 if (ret_code)
219 return NULL;
220 if (!*wqe_idx)
221 qp->rwqe_polarity = !qp->rwqe_polarity;
222 /* rq_wqe_size_multiplier is no of qwords in one rq wqe */
223 wqe = qp->rq_base[*wqe_idx * (qp->rq_wqe_size_multiplier >> 2)].elem;
225 return wqe;
229 * i40iw_rdma_write - rdma write operation
230 * @qp: hw qp ptr
231 * @info: post sq information
232 * @post_sq: flag to post sq
234 static enum i40iw_status_code i40iw_rdma_write(struct i40iw_qp_uk *qp,
235 struct i40iw_post_sq_info *info,
236 bool post_sq)
238 u64 header;
239 u64 *wqe;
240 struct i40iw_rdma_write *op_info;
241 u32 i, wqe_idx;
242 u32 total_size = 0, byte_off;
243 enum i40iw_status_code ret_code;
244 bool read_fence = false;
245 u8 wqe_size;
247 op_info = &info->op.rdma_write;
248 if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
249 return I40IW_ERR_INVALID_FRAG_COUNT;
251 for (i = 0; i < op_info->num_lo_sges; i++)
252 total_size += op_info->lo_sg_list[i].len;
254 if (total_size > I40IW_MAX_OUTBOUND_MESSAGE_SIZE)
255 return I40IW_ERR_QP_INVALID_MSG_SIZE;
257 read_fence |= info->read_fence;
259 ret_code = i40iw_fragcnt_to_wqesize_sq(op_info->num_lo_sges, &wqe_size);
260 if (ret_code)
261 return ret_code;
263 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, total_size, info->wr_id);
264 if (!wqe)
265 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
266 set_64bit_val(wqe, 16,
267 LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
268 if (!op_info->rem_addr.stag)
269 return I40IW_ERR_BAD_STAG;
271 header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |
272 LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
273 LS_64((op_info->num_lo_sges > 1 ? (op_info->num_lo_sges - 1) : 0), I40IWQPSQ_ADDFRAGCNT) |
274 LS_64(read_fence, I40IWQPSQ_READFENCE) |
275 LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
276 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
277 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
279 i40iw_set_fragment(wqe, 0, op_info->lo_sg_list);
281 for (i = 1, byte_off = 32; i < op_info->num_lo_sges; i++) {
282 i40iw_set_fragment(wqe, byte_off, &op_info->lo_sg_list[i]);
283 byte_off += 16;
286 wmb(); /* make sure WQE is populated before valid bit is set */
288 set_64bit_val(wqe, 24, header);
290 if (post_sq)
291 i40iw_qp_post_wr(qp);
293 return 0;
297 * i40iw_rdma_read - rdma read command
298 * @qp: hw qp ptr
299 * @info: post sq information
300 * @inv_stag: flag for inv_stag
301 * @post_sq: flag to post sq
303 static enum i40iw_status_code i40iw_rdma_read(struct i40iw_qp_uk *qp,
304 struct i40iw_post_sq_info *info,
305 bool inv_stag,
306 bool post_sq)
308 u64 *wqe;
309 struct i40iw_rdma_read *op_info;
310 u64 header;
311 u32 wqe_idx;
312 enum i40iw_status_code ret_code;
313 u8 wqe_size;
314 bool local_fence = false;
316 op_info = &info->op.rdma_read;
317 ret_code = i40iw_fragcnt_to_wqesize_sq(1, &wqe_size);
318 if (ret_code)
319 return ret_code;
320 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->lo_addr.len, info->wr_id);
321 if (!wqe)
322 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
323 local_fence |= info->local_fence;
325 set_64bit_val(wqe, 16, LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
326 header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |
327 LS_64((inv_stag ? I40IWQP_OP_RDMA_READ_LOC_INV : I40IWQP_OP_RDMA_READ), I40IWQPSQ_OPCODE) |
328 LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
329 LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |
330 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
331 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
333 i40iw_set_fragment(wqe, 0, &op_info->lo_addr);
335 wmb(); /* make sure WQE is populated before valid bit is set */
337 set_64bit_val(wqe, 24, header);
338 if (post_sq)
339 i40iw_qp_post_wr(qp);
341 return 0;
345 * i40iw_send - rdma send command
346 * @qp: hw qp ptr
347 * @info: post sq information
348 * @stag_to_inv: stag_to_inv value
349 * @post_sq: flag to post sq
351 static enum i40iw_status_code i40iw_send(struct i40iw_qp_uk *qp,
352 struct i40iw_post_sq_info *info,
353 u32 stag_to_inv,
354 bool post_sq)
356 u64 *wqe;
357 struct i40iw_post_send *op_info;
358 u64 header;
359 u32 i, wqe_idx, total_size = 0, byte_off;
360 enum i40iw_status_code ret_code;
361 bool read_fence = false;
362 u8 wqe_size;
364 op_info = &info->op.send;
365 if (qp->max_sq_frag_cnt < op_info->num_sges)
366 return I40IW_ERR_INVALID_FRAG_COUNT;
368 for (i = 0; i < op_info->num_sges; i++)
369 total_size += op_info->sg_list[i].len;
370 ret_code = i40iw_fragcnt_to_wqesize_sq(op_info->num_sges, &wqe_size);
371 if (ret_code)
372 return ret_code;
374 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, total_size, info->wr_id);
375 if (!wqe)
376 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
378 read_fence |= info->read_fence;
379 set_64bit_val(wqe, 16, 0);
380 header = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) |
381 LS_64(info->op_type, I40IWQPSQ_OPCODE) |
382 LS_64((op_info->num_sges > 1 ? (op_info->num_sges - 1) : 0),
383 I40IWQPSQ_ADDFRAGCNT) |
384 LS_64(read_fence, I40IWQPSQ_READFENCE) |
385 LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
386 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
387 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
389 i40iw_set_fragment(wqe, 0, op_info->sg_list);
391 for (i = 1, byte_off = 32; i < op_info->num_sges; i++) {
392 i40iw_set_fragment(wqe, byte_off, &op_info->sg_list[i]);
393 byte_off += 16;
396 wmb(); /* make sure WQE is populated before valid bit is set */
398 set_64bit_val(wqe, 24, header);
399 if (post_sq)
400 i40iw_qp_post_wr(qp);
402 return 0;
406 * i40iw_inline_rdma_write - inline rdma write operation
407 * @qp: hw qp ptr
408 * @info: post sq information
409 * @post_sq: flag to post sq
411 static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
412 struct i40iw_post_sq_info *info,
413 bool post_sq)
415 u64 *wqe;
416 u8 *dest, *src;
417 struct i40iw_inline_rdma_write *op_info;
418 u64 header = 0;
419 u32 wqe_idx;
420 enum i40iw_status_code ret_code;
421 bool read_fence = false;
422 u8 wqe_size;
424 op_info = &info->op.inline_rdma_write;
425 if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
426 return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
428 ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
429 if (ret_code)
430 return ret_code;
432 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->len, info->wr_id);
433 if (!wqe)
434 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
436 read_fence |= info->read_fence;
437 set_64bit_val(wqe, 16,
438 LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
440 header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |
441 LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
442 LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) |
443 LS_64(1, I40IWQPSQ_INLINEDATAFLAG) |
444 LS_64(read_fence, I40IWQPSQ_READFENCE) |
445 LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
446 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
447 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
449 dest = (u8 *)wqe;
450 src = (u8 *)(op_info->data);
452 if (op_info->len <= 16) {
453 memcpy(dest, src, op_info->len);
454 } else {
455 memcpy(dest, src, 16);
456 src += 16;
457 dest = (u8 *)wqe + 32;
458 memcpy(dest, src, op_info->len - 16);
461 wmb(); /* make sure WQE is populated before valid bit is set */
463 set_64bit_val(wqe, 24, header);
465 if (post_sq)
466 i40iw_qp_post_wr(qp);
468 return 0;
472 * i40iw_inline_send - inline send operation
473 * @qp: hw qp ptr
474 * @info: post sq information
475 * @stag_to_inv: remote stag
476 * @post_sq: flag to post sq
478 static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
479 struct i40iw_post_sq_info *info,
480 u32 stag_to_inv,
481 bool post_sq)
483 u64 *wqe;
484 u8 *dest, *src;
485 struct i40iw_post_inline_send *op_info;
486 u64 header;
487 u32 wqe_idx;
488 enum i40iw_status_code ret_code;
489 bool read_fence = false;
490 u8 wqe_size;
492 op_info = &info->op.inline_send;
493 if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
494 return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
496 ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
497 if (ret_code)
498 return ret_code;
500 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->len, info->wr_id);
501 if (!wqe)
502 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
504 read_fence |= info->read_fence;
505 header = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) |
506 LS_64(info->op_type, I40IWQPSQ_OPCODE) |
507 LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) |
508 LS_64(1, I40IWQPSQ_INLINEDATAFLAG) |
509 LS_64(read_fence, I40IWQPSQ_READFENCE) |
510 LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
511 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
512 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
514 dest = (u8 *)wqe;
515 src = (u8 *)(op_info->data);
517 if (op_info->len <= 16) {
518 memcpy(dest, src, op_info->len);
519 } else {
520 memcpy(dest, src, 16);
521 src += 16;
522 dest = (u8 *)wqe + 32;
523 memcpy(dest, src, op_info->len - 16);
526 wmb(); /* make sure WQE is populated before valid bit is set */
528 set_64bit_val(wqe, 24, header);
530 if (post_sq)
531 i40iw_qp_post_wr(qp);
533 return 0;
537 * i40iw_stag_local_invalidate - stag invalidate operation
538 * @qp: hw qp ptr
539 * @info: post sq information
540 * @post_sq: flag to post sq
542 static enum i40iw_status_code i40iw_stag_local_invalidate(struct i40iw_qp_uk *qp,
543 struct i40iw_post_sq_info *info,
544 bool post_sq)
546 u64 *wqe;
547 struct i40iw_inv_local_stag *op_info;
548 u64 header;
549 u32 wqe_idx;
550 bool local_fence = false;
552 op_info = &info->op.inv_local_stag;
553 local_fence = info->local_fence;
555 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, info->wr_id);
556 if (!wqe)
557 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
558 set_64bit_val(wqe, 0, 0);
559 set_64bit_val(wqe, 8,
560 LS_64(op_info->target_stag, I40IWQPSQ_LOCSTAG));
561 set_64bit_val(wqe, 16, 0);
562 header = LS_64(I40IW_OP_TYPE_INV_STAG, I40IWQPSQ_OPCODE) |
563 LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
564 LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |
565 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
566 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
568 wmb(); /* make sure WQE is populated before valid bit is set */
570 set_64bit_val(wqe, 24, header);
572 if (post_sq)
573 i40iw_qp_post_wr(qp);
575 return 0;
579 * i40iw_mw_bind - Memory Window bind operation
580 * @qp: hw qp ptr
581 * @info: post sq information
582 * @post_sq: flag to post sq
584 static enum i40iw_status_code i40iw_mw_bind(struct i40iw_qp_uk *qp,
585 struct i40iw_post_sq_info *info,
586 bool post_sq)
588 u64 *wqe;
589 struct i40iw_bind_window *op_info;
590 u64 header;
591 u32 wqe_idx;
592 bool local_fence = false;
594 op_info = &info->op.bind_window;
596 local_fence |= info->local_fence;
597 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, info->wr_id);
598 if (!wqe)
599 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
600 set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
601 set_64bit_val(wqe, 8,
602 LS_64(op_info->mr_stag, I40IWQPSQ_PARENTMRSTAG) |
603 LS_64(op_info->mw_stag, I40IWQPSQ_MWSTAG));
604 set_64bit_val(wqe, 16, op_info->bind_length);
605 header = LS_64(I40IW_OP_TYPE_BIND_MW, I40IWQPSQ_OPCODE) |
606 LS_64(((op_info->enable_reads << 2) |
607 (op_info->enable_writes << 3)),
608 I40IWQPSQ_STAGRIGHTS) |
609 LS_64((op_info->addressing_type == I40IW_ADDR_TYPE_VA_BASED ? 1 : 0),
610 I40IWQPSQ_VABASEDTO) |
611 LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
612 LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |
613 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
614 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
616 wmb(); /* make sure WQE is populated before valid bit is set */
618 set_64bit_val(wqe, 24, header);
620 if (post_sq)
621 i40iw_qp_post_wr(qp);
623 return 0;
627 * i40iw_post_receive - post receive wqe
628 * @qp: hw qp ptr
629 * @info: post rq information
631 static enum i40iw_status_code i40iw_post_receive(struct i40iw_qp_uk *qp,
632 struct i40iw_post_rq_info *info)
634 u64 *wqe;
635 u64 header;
636 u32 total_size = 0, wqe_idx, i, byte_off;
638 if (qp->max_rq_frag_cnt < info->num_sges)
639 return I40IW_ERR_INVALID_FRAG_COUNT;
640 for (i = 0; i < info->num_sges; i++)
641 total_size += info->sg_list[i].len;
642 wqe = i40iw_qp_get_next_recv_wqe(qp, &wqe_idx);
643 if (!wqe)
644 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
646 qp->rq_wrid_array[wqe_idx] = info->wr_id;
647 set_64bit_val(wqe, 16, 0);
649 header = LS_64((info->num_sges > 1 ? (info->num_sges - 1) : 0),
650 I40IWQPSQ_ADDFRAGCNT) |
651 LS_64(qp->rwqe_polarity, I40IWQPSQ_VALID);
653 i40iw_set_fragment(wqe, 0, info->sg_list);
655 for (i = 1, byte_off = 32; i < info->num_sges; i++) {
656 i40iw_set_fragment(wqe, byte_off, &info->sg_list[i]);
657 byte_off += 16;
660 wmb(); /* make sure WQE is populated before valid bit is set */
662 set_64bit_val(wqe, 24, header);
664 return 0;
668 * i40iw_cq_request_notification - cq notification request (door bell)
669 * @cq: hw cq
670 * @cq_notify: notification type
672 static void i40iw_cq_request_notification(struct i40iw_cq_uk *cq,
673 enum i40iw_completion_notify cq_notify)
675 u64 temp_val;
676 u16 sw_cq_sel;
677 u8 arm_next_se = 0;
678 u8 arm_next = 0;
679 u8 arm_seq_num;
681 get_64bit_val(cq->shadow_area, 32, &temp_val);
682 arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM);
683 arm_seq_num++;
685 sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT);
686 arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE);
687 arm_next_se |= 1;
688 if (cq_notify == IW_CQ_COMPL_EVENT)
689 arm_next = 1;
690 temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) |
691 LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) |
692 LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) |
693 LS_64(arm_next, I40IW_CQ_DBSA_ARM_NEXT);
695 set_64bit_val(cq->shadow_area, 32, temp_val);
697 wmb(); /* make sure WQE is populated before valid bit is set */
699 writel(cq->cq_id, cq->cqe_alloc_reg);
703 * i40iw_cq_post_entries - update tail in shadow memory
704 * @cq: hw cq
705 * @count: # of entries processed
707 static enum i40iw_status_code i40iw_cq_post_entries(struct i40iw_cq_uk *cq,
708 u8 count)
710 I40IW_RING_MOVE_TAIL_BY_COUNT(cq->cq_ring, count);
711 set_64bit_val(cq->shadow_area, 0,
712 I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
713 return 0;
717 * i40iw_cq_poll_completion - get cq completion info
718 * @cq: hw cq
719 * @info: cq poll information returned
720 * @post_cq: update cq tail
722 static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
723 struct i40iw_cq_poll_info *info)
725 u64 comp_ctx, qword0, qword2, qword3, wqe_qword;
726 u64 *cqe, *sw_wqe;
727 struct i40iw_qp_uk *qp;
728 struct i40iw_ring *pring = NULL;
729 u32 wqe_idx, q_type, array_idx = 0;
730 enum i40iw_status_code ret_code = 0;
731 bool move_cq_head = true;
732 u8 polarity;
733 u8 addl_wqes = 0;
735 if (cq->avoid_mem_cflct)
736 cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(cq);
737 else
738 cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(cq);
740 get_64bit_val(cqe, 24, &qword3);
741 polarity = (u8)RS_64(qword3, I40IW_CQ_VALID);
743 if (polarity != cq->polarity)
744 return I40IW_ERR_QUEUE_EMPTY;
746 q_type = (u8)RS_64(qword3, I40IW_CQ_SQ);
747 info->error = (bool)RS_64(qword3, I40IW_CQ_ERROR);
748 if (info->error) {
749 info->comp_status = I40IW_COMPL_STATUS_FLUSHED;
750 info->major_err = (bool)RS_64(qword3, I40IW_CQ_MAJERR);
751 info->minor_err = (bool)RS_64(qword3, I40IW_CQ_MINERR);
752 } else {
753 info->comp_status = I40IW_COMPL_STATUS_SUCCESS;
756 get_64bit_val(cqe, 0, &qword0);
757 get_64bit_val(cqe, 16, &qword2);
759 info->tcp_seq_num = (u32)RS_64(qword0, I40IWCQ_TCPSEQNUM);
761 info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
763 get_64bit_val(cqe, 8, &comp_ctx);
765 info->solicited_event = (bool)RS_64(qword3, I40IWCQ_SOEVENT);
766 info->is_srq = (bool)RS_64(qword3, I40IWCQ_SRQ);
768 qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx;
769 if (!qp) {
770 ret_code = I40IW_ERR_QUEUE_DESTROYED;
771 goto exit;
773 wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX);
774 info->qp_handle = (i40iw_qp_handle)(unsigned long)qp;
776 if (q_type == I40IW_CQE_QTYPE_RQ) {
777 array_idx = (wqe_idx * 4) / qp->rq_wqe_size_multiplier;
778 if (info->comp_status == I40IW_COMPL_STATUS_FLUSHED) {
779 info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
780 array_idx = qp->rq_ring.tail;
781 } else {
782 info->wr_id = qp->rq_wrid_array[array_idx];
785 info->op_type = I40IW_OP_TYPE_REC;
786 if (qword3 & I40IWCQ_STAG_MASK) {
787 info->stag_invalid_set = true;
788 info->inv_stag = (u32)RS_64(qword2, I40IWCQ_INVSTAG);
789 } else {
790 info->stag_invalid_set = false;
792 info->bytes_xfered = (u32)RS_64(qword0, I40IWCQ_PAYLDLEN);
793 I40IW_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
794 pring = &qp->rq_ring;
795 } else {
796 if (qp->first_sq_wq) {
797 qp->first_sq_wq = false;
798 if (!wqe_idx && (qp->sq_ring.head == qp->sq_ring.tail)) {
799 I40IW_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
800 I40IW_RING_MOVE_TAIL(cq->cq_ring);
801 set_64bit_val(cq->shadow_area, 0,
802 I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
803 memset(info, 0, sizeof(struct i40iw_cq_poll_info));
804 return i40iw_cq_poll_completion(cq, info);
808 if (info->comp_status != I40IW_COMPL_STATUS_FLUSHED) {
809 info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
810 info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
812 info->op_type = (u8)RS_64(qword3, I40IWCQ_OP);
813 sw_wqe = qp->sq_base[wqe_idx].elem;
814 get_64bit_val(sw_wqe, 24, &wqe_qword);
816 addl_wqes = qp->sq_wrtrk_array[wqe_idx].wqe_size / I40IW_QP_WQE_MIN_SIZE;
817 I40IW_RING_SET_TAIL(qp->sq_ring, (wqe_idx + addl_wqes));
818 } else {
819 do {
820 u8 op_type;
821 u32 tail;
823 tail = qp->sq_ring.tail;
824 sw_wqe = qp->sq_base[tail].elem;
825 get_64bit_val(sw_wqe, 24, &wqe_qword);
826 op_type = (u8)RS_64(wqe_qword, I40IWQPSQ_OPCODE);
827 info->op_type = op_type;
828 addl_wqes = qp->sq_wrtrk_array[tail].wqe_size / I40IW_QP_WQE_MIN_SIZE;
829 I40IW_RING_SET_TAIL(qp->sq_ring, (tail + addl_wqes));
830 if (op_type != I40IWQP_OP_NOP) {
831 info->wr_id = qp->sq_wrtrk_array[tail].wrid;
832 info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
833 break;
835 } while (1);
837 pring = &qp->sq_ring;
840 ret_code = 0;
842 exit:
843 if (!ret_code &&
844 (info->comp_status == I40IW_COMPL_STATUS_FLUSHED))
845 if (pring && (I40IW_RING_MORE_WORK(*pring)))
846 move_cq_head = false;
848 if (move_cq_head) {
849 I40IW_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
851 if (I40IW_RING_GETCURRENT_HEAD(cq->cq_ring) == 0)
852 cq->polarity ^= 1;
854 I40IW_RING_MOVE_TAIL(cq->cq_ring);
855 set_64bit_val(cq->shadow_area, 0,
856 I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
857 } else {
858 if (info->is_srq)
859 return ret_code;
860 qword3 &= ~I40IW_CQ_WQEIDX_MASK;
861 qword3 |= LS_64(pring->tail, I40IW_CQ_WQEIDX);
862 set_64bit_val(cqe, 24, qword3);
865 return ret_code;
869 * i40iw_get_wqe_shift - get shift count for maximum wqe size
870 * @sge: Maximum Scatter Gather Elements wqe
871 * @inline_data: Maximum inline data size
872 * @shift: Returns the shift needed based on sge
874 * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
875 * For 1 SGE or inline data <= 16, shift = 0 (wqe size of 32 bytes).
876 * For 2 or 3 SGEs or inline data <= 48, shift = 1 (wqe size of 64 bytes).
877 * Shift of 2 otherwise (wqe size of 128 bytes).
879 void i40iw_get_wqe_shift(u32 sge, u32 inline_data, u8 *shift)
881 *shift = 0;
882 if (sge > 1 || inline_data > 16)
883 *shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
887 * i40iw_get_sqdepth - get SQ depth (quantas)
888 * @sq_size: SQ size
889 * @shift: shift which determines size of WQE
890 * @sqdepth: depth of SQ
893 enum i40iw_status_code i40iw_get_sqdepth(u32 sq_size, u8 shift, u32 *sqdepth)
895 *sqdepth = roundup_pow_of_two((sq_size << shift) + I40IW_SQ_RSVD);
897 if (*sqdepth < (I40IW_QP_SW_MIN_WQSIZE << shift))
898 *sqdepth = I40IW_QP_SW_MIN_WQSIZE << shift;
899 else if (*sqdepth > I40IW_QP_SW_MAX_SQ_QUANTAS)
900 return I40IW_ERR_INVALID_SIZE;
902 return 0;
906 * i40iw_get_rq_depth - get RQ depth (quantas)
907 * @rq_size: RQ size
908 * @shift: shift which determines size of WQE
909 * @rqdepth: depth of RQ
912 enum i40iw_status_code i40iw_get_rqdepth(u32 rq_size, u8 shift, u32 *rqdepth)
914 *rqdepth = roundup_pow_of_two((rq_size << shift) + I40IW_RQ_RSVD);
916 if (*rqdepth < (I40IW_QP_SW_MIN_WQSIZE << shift))
917 *rqdepth = I40IW_QP_SW_MIN_WQSIZE << shift;
918 else if (*rqdepth > I40IW_QP_SW_MAX_RQ_QUANTAS)
919 return I40IW_ERR_INVALID_SIZE;
921 return 0;
924 static const struct i40iw_qp_uk_ops iw_qp_uk_ops = {
925 .iw_qp_post_wr = i40iw_qp_post_wr,
926 .iw_rdma_write = i40iw_rdma_write,
927 .iw_rdma_read = i40iw_rdma_read,
928 .iw_send = i40iw_send,
929 .iw_inline_rdma_write = i40iw_inline_rdma_write,
930 .iw_inline_send = i40iw_inline_send,
931 .iw_stag_local_invalidate = i40iw_stag_local_invalidate,
932 .iw_mw_bind = i40iw_mw_bind,
933 .iw_post_receive = i40iw_post_receive,
934 .iw_post_nop = i40iw_nop
937 static const struct i40iw_cq_ops iw_cq_ops = {
938 .iw_cq_request_notification = i40iw_cq_request_notification,
939 .iw_cq_poll_completion = i40iw_cq_poll_completion,
940 .iw_cq_post_entries = i40iw_cq_post_entries,
941 .iw_cq_clean = i40iw_clean_cq
944 static const struct i40iw_device_uk_ops iw_device_uk_ops = {
945 .iwarp_cq_uk_init = i40iw_cq_uk_init,
946 .iwarp_qp_uk_init = i40iw_qp_uk_init,
950 * i40iw_qp_uk_init - initialize shared qp
951 * @qp: hw qp (user and kernel)
952 * @info: qp initialization info
954 * initializes the vars used in both user and kernel mode.
955 * size of the wqe depends on numbers of max. fragements
956 * allowed. Then size of wqe * the number of wqes should be the
957 * amount of memory allocated for sq and rq. If srq is used,
958 * then rq_base will point to one rq wqe only (not the whole
959 * array of wqes)
961 enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
962 struct i40iw_qp_uk_init_info *info)
964 enum i40iw_status_code ret_code = 0;
965 u32 sq_ring_size;
966 u8 sqshift, rqshift;
968 if (info->max_sq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
969 return I40IW_ERR_INVALID_FRAG_COUNT;
971 if (info->max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
972 return I40IW_ERR_INVALID_FRAG_COUNT;
973 i40iw_get_wqe_shift(info->max_sq_frag_cnt, info->max_inline_data, &sqshift);
975 qp->sq_base = info->sq;
976 qp->rq_base = info->rq;
977 qp->shadow_area = info->shadow_area;
978 qp->sq_wrtrk_array = info->sq_wrtrk_array;
979 qp->rq_wrid_array = info->rq_wrid_array;
981 qp->wqe_alloc_reg = info->wqe_alloc_reg;
982 qp->qp_id = info->qp_id;
983 qp->sq_size = info->sq_size;
984 qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
985 sq_ring_size = qp->sq_size << sqshift;
987 I40IW_RING_INIT(qp->sq_ring, sq_ring_size);
988 I40IW_RING_INIT(qp->initial_ring, sq_ring_size);
989 I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
990 I40IW_RING_MOVE_TAIL(qp->sq_ring);
991 I40IW_RING_MOVE_HEAD(qp->initial_ring, ret_code);
992 qp->swqe_polarity = 1;
993 qp->first_sq_wq = true;
994 qp->swqe_polarity_deferred = 1;
995 qp->rwqe_polarity = 0;
997 if (!qp->use_srq) {
998 qp->rq_size = info->rq_size;
999 qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
1000 I40IW_RING_INIT(qp->rq_ring, qp->rq_size);
1001 switch (info->abi_ver) {
1002 case 4:
1003 i40iw_get_wqe_shift(info->max_rq_frag_cnt, 0, &rqshift);
1004 break;
1005 case 5: /* fallthrough until next ABI version */
1006 default:
1007 rqshift = I40IW_MAX_RQ_WQE_SHIFT;
1008 break;
1010 qp->rq_wqe_size = rqshift;
1011 qp->rq_wqe_size_multiplier = 4 << rqshift;
1013 qp->ops = iw_qp_uk_ops;
1015 return ret_code;
1019 * i40iw_cq_uk_init - initialize shared cq (user and kernel)
1020 * @cq: hw cq
1021 * @info: hw cq initialization info
1023 enum i40iw_status_code i40iw_cq_uk_init(struct i40iw_cq_uk *cq,
1024 struct i40iw_cq_uk_init_info *info)
1026 if ((info->cq_size < I40IW_MIN_CQ_SIZE) ||
1027 (info->cq_size > I40IW_MAX_CQ_SIZE))
1028 return I40IW_ERR_INVALID_SIZE;
1029 cq->cq_base = (struct i40iw_cqe *)info->cq_base;
1030 cq->cq_id = info->cq_id;
1031 cq->cq_size = info->cq_size;
1032 cq->cqe_alloc_reg = info->cqe_alloc_reg;
1033 cq->shadow_area = info->shadow_area;
1034 cq->avoid_mem_cflct = info->avoid_mem_cflct;
1036 I40IW_RING_INIT(cq->cq_ring, cq->cq_size);
1037 cq->polarity = 1;
1038 cq->ops = iw_cq_ops;
1040 return 0;
1044 * i40iw_device_init_uk - setup routines for iwarp shared device
1045 * @dev: iwarp shared (user and kernel)
1047 void i40iw_device_init_uk(struct i40iw_dev_uk *dev)
1049 dev->ops_uk = iw_device_uk_ops;
1053 * i40iw_clean_cq - clean cq entries
1054 * @ queue completion context
1055 * @cq: cq to clean
1057 void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq)
1059 u64 *cqe;
1060 u64 qword3, comp_ctx;
1061 u32 cq_head;
1062 u8 polarity, temp;
1064 cq_head = cq->cq_ring.head;
1065 temp = cq->polarity;
1066 do {
1067 if (cq->avoid_mem_cflct)
1068 cqe = (u64 *)&(((struct i40iw_extended_cqe *)cq->cq_base)[cq_head]);
1069 else
1070 cqe = (u64 *)&cq->cq_base[cq_head];
1071 get_64bit_val(cqe, 24, &qword3);
1072 polarity = (u8)RS_64(qword3, I40IW_CQ_VALID);
1074 if (polarity != temp)
1075 break;
1077 get_64bit_val(cqe, 8, &comp_ctx);
1078 if ((void *)(unsigned long)comp_ctx == queue)
1079 set_64bit_val(cqe, 8, 0);
1081 cq_head = (cq_head + 1) % cq->cq_ring.size;
1082 if (!cq_head)
1083 temp ^= 1;
1084 } while (true);
1088 * i40iw_nop - send a nop
1089 * @qp: hw qp ptr
1090 * @wr_id: work request id
1091 * @signaled: flag if signaled for completion
1092 * @post_sq: flag to post sq
1094 enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp,
1095 u64 wr_id,
1096 bool signaled,
1097 bool post_sq)
1099 u64 header, *wqe;
1100 u32 wqe_idx;
1102 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, wr_id);
1103 if (!wqe)
1104 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
1105 set_64bit_val(wqe, 0, 0);
1106 set_64bit_val(wqe, 8, 0);
1107 set_64bit_val(wqe, 16, 0);
1109 header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
1110 LS_64(signaled, I40IWQPSQ_SIGCOMPL) |
1111 LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
1113 wmb(); /* make sure WQE is populated before valid bit is set */
1115 set_64bit_val(wqe, 24, header);
1116 if (post_sq)
1117 i40iw_qp_post_wr(qp);
1119 return 0;
1123 * i40iw_fragcnt_to_wqesize_sq - calculate wqe size based on fragment count for SQ
1124 * @frag_cnt: number of fragments
1125 * @wqe_size: size of sq wqe returned
1127 enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u32 frag_cnt, u8 *wqe_size)
1129 switch (frag_cnt) {
1130 case 0:
1131 case 1:
1132 *wqe_size = I40IW_QP_WQE_MIN_SIZE;
1133 break;
1134 case 2:
1135 case 3:
1136 *wqe_size = 64;
1137 break;
1138 case 4:
1139 case 5:
1140 *wqe_size = 96;
1141 break;
1142 case 6:
1143 case 7:
1144 *wqe_size = 128;
1145 break;
1146 default:
1147 return I40IW_ERR_INVALID_FRAG_COUNT;
1150 return 0;
1154 * i40iw_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
1155 * @frag_cnt: number of fragments
1156 * @wqe_size: size of rq wqe returned
1158 enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u32 frag_cnt, u8 *wqe_size)
1160 switch (frag_cnt) {
1161 case 0:
1162 case 1:
1163 *wqe_size = 32;
1164 break;
1165 case 2:
1166 case 3:
1167 *wqe_size = 64;
1168 break;
1169 case 4:
1170 case 5:
1171 case 6:
1172 case 7:
1173 *wqe_size = 128;
1174 break;
1175 default:
1176 return I40IW_ERR_INVALID_FRAG_COUNT;
1179 return 0;
1183 * i40iw_inline_data_size_to_wqesize - based on inline data, wqe size
1184 * @data_size: data size for inline
1185 * @wqe_size: size of sq wqe returned
1187 enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
1188 u8 *wqe_size)
1190 if (data_size > I40IW_MAX_INLINE_DATA_SIZE)
1191 return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
1193 if (data_size <= 16)
1194 *wqe_size = I40IW_QP_WQE_MIN_SIZE;
1195 else
1196 *wqe_size = 64;
1198 return 0;