1 /*******************************************************************************
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 *******************************************************************************/
35 #include "i40iw_osdep.h"
36 #include "i40iw_status.h"
38 #include "i40iw_user.h"
39 #include "i40iw_register.h"
41 static u32 nop_signature
= 0x55550000;
44 * i40iw_nop_1 - insert a nop wqe and move head. no post work
47 static enum i40iw_status_code
i40iw_nop_1(struct i40iw_qp_uk
*qp
)
51 u32 wqe_idx
, peek_head
;
52 bool signaled
= false;
54 if (!qp
->sq_ring
.head
)
55 return I40IW_ERR_PARAM
;
57 wqe_idx
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
58 wqe
= qp
->sq_base
[wqe_idx
].elem
;
60 qp
->sq_wrtrk_array
[wqe_idx
].wqe_size
= I40IW_QP_WQE_MIN_SIZE
;
62 peek_head
= (qp
->sq_ring
.head
+ 1) % qp
->sq_ring
.size
;
63 wqe_0
= qp
->sq_base
[peek_head
].elem
;
65 wqe_0
[3] = LS_64(!qp
->swqe_polarity
, I40IWQPSQ_VALID
);
67 wqe_0
[3] = LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
69 set_64bit_val(wqe
, 0, 0);
70 set_64bit_val(wqe
, 8, 0);
71 set_64bit_val(wqe
, 16, 0);
73 header
= LS_64(I40IWQP_OP_NOP
, I40IWQPSQ_OPCODE
) |
74 LS_64(signaled
, I40IWQPSQ_SIGCOMPL
) |
75 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
) | nop_signature
++;
77 wmb(); /* Memory barrier to ensure data is written before valid bit is set */
79 set_64bit_val(wqe
, 24, header
);
84 * i40iw_qp_post_wr - post wr to hrdware
87 void i40iw_qp_post_wr(struct i40iw_qp_uk
*qp
)
93 mb(); /* valid bit is written and loads completed before reading shadow */
95 /* read the doorbell shadow area */
96 get_64bit_val(qp
->shadow_area
, 0, &temp
);
98 hw_sq_tail
= (u32
)RS_64(temp
, I40IW_QP_DBSA_HW_SQ_TAIL
);
99 sw_sq_head
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
100 if (sw_sq_head
!= hw_sq_tail
) {
101 if (sw_sq_head
> qp
->initial_ring
.head
) {
102 if ((hw_sq_tail
>= qp
->initial_ring
.head
) &&
103 (hw_sq_tail
< sw_sq_head
)) {
104 writel(qp
->qp_id
, qp
->wqe_alloc_reg
);
106 } else if (sw_sq_head
!= qp
->initial_ring
.head
) {
107 if ((hw_sq_tail
>= qp
->initial_ring
.head
) ||
108 (hw_sq_tail
< sw_sq_head
)) {
109 writel(qp
->qp_id
, qp
->wqe_alloc_reg
);
114 qp
->initial_ring
.head
= qp
->sq_ring
.head
;
118 * i40iw_qp_ring_push_db - ring qp doorbell
120 * @wqe_idx: wqe index
122 static void i40iw_qp_ring_push_db(struct i40iw_qp_uk
*qp
, u32 wqe_idx
)
124 set_32bit_val(qp
->push_db
, 0, LS_32((wqe_idx
>> 2), I40E_PFPE_WQEALLOC_WQE_DESC_INDEX
) | qp
->qp_id
);
125 qp
->initial_ring
.head
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
129 * i40iw_qp_get_next_send_wqe - return next wqe ptr
131 * @wqe_idx: return wqe index
132 * @wqe_size: size of sq wqe
134 u64
*i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk
*qp
,
145 enum i40iw_status_code ret_code
= 0;
146 u8 nop_wqe_cnt
= 0, i
;
149 *wqe_idx
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
152 qp
->swqe_polarity
= !qp
->swqe_polarity
;
153 wqe_ptr
= (uintptr_t)qp
->sq_base
[*wqe_idx
].elem
;
154 offset
= (u16
)(wqe_ptr
) & 0x7F;
155 if ((offset
+ wqe_size
) > I40IW_QP_WQE_MAX_SIZE
) {
156 nop_wqe_cnt
= (u8
)(I40IW_QP_WQE_MAX_SIZE
- offset
) / I40IW_QP_WQE_MIN_SIZE
;
157 for (i
= 0; i
< nop_wqe_cnt
; i
++) {
159 I40IW_RING_MOVE_HEAD(qp
->sq_ring
, ret_code
);
164 *wqe_idx
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
166 qp
->swqe_polarity
= !qp
->swqe_polarity
;
169 if (((*wqe_idx
& 3) == 1) && (wqe_size
== I40IW_WQE_SIZE_64
)) {
171 I40IW_RING_MOVE_HEAD(qp
->sq_ring
, ret_code
);
174 *wqe_idx
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
176 qp
->swqe_polarity
= !qp
->swqe_polarity
;
179 for (i
= 0; i
< wqe_size
/ I40IW_QP_WQE_MIN_SIZE
; i
++) {
180 I40IW_RING_MOVE_HEAD(qp
->sq_ring
, ret_code
);
185 wqe
= qp
->sq_base
[*wqe_idx
].elem
;
187 peek_head
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
188 wqe_0
= qp
->sq_base
[peek_head
].elem
;
190 if (((peek_head
& 3) == 1) || ((peek_head
& 3) == 3)) {
191 if (RS_64(wqe_0
[3], I40IWQPSQ_VALID
) != !qp
->swqe_polarity
)
192 wqe_0
[3] = LS_64(!qp
->swqe_polarity
, I40IWQPSQ_VALID
);
195 qp
->sq_wrtrk_array
[*wqe_idx
].wrid
= wr_id
;
196 qp
->sq_wrtrk_array
[*wqe_idx
].wr_len
= total_size
;
197 qp
->sq_wrtrk_array
[*wqe_idx
].wqe_size
= wqe_size
;
202 * i40iw_set_fragment - set fragment in wqe
203 * @wqe: wqe for setting fragment
204 * @offset: offset value
205 * @sge: sge length and stag
207 static void i40iw_set_fragment(u64
*wqe
, u32 offset
, struct i40iw_sge
*sge
)
210 set_64bit_val(wqe
, offset
, LS_64(sge
->tag_off
, I40IWQPSQ_FRAG_TO
));
211 set_64bit_val(wqe
, (offset
+ 8),
212 (LS_64(sge
->len
, I40IWQPSQ_FRAG_LEN
) |
213 LS_64(sge
->stag
, I40IWQPSQ_FRAG_STAG
)));
218 * i40iw_qp_get_next_recv_wqe - get next qp's rcv wqe
220 * @wqe_idx: return wqe index
222 u64
*i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk
*qp
, u32
*wqe_idx
)
225 enum i40iw_status_code ret_code
;
227 if (I40IW_RING_FULL_ERR(qp
->rq_ring
))
230 I40IW_ATOMIC_RING_MOVE_HEAD(qp
->rq_ring
, *wqe_idx
, ret_code
);
234 qp
->rwqe_polarity
= !qp
->rwqe_polarity
;
235 /* rq_wqe_size_multiplier is no of qwords in one rq wqe */
236 wqe
= qp
->rq_base
[*wqe_idx
* (qp
->rq_wqe_size_multiplier
>> 2)].elem
;
242 * i40iw_rdma_write - rdma write operation
244 * @info: post sq information
245 * @post_sq: flag to post sq
247 static enum i40iw_status_code
i40iw_rdma_write(struct i40iw_qp_uk
*qp
,
248 struct i40iw_post_sq_info
*info
,
253 struct i40iw_rdma_write
*op_info
;
255 u32 total_size
= 0, byte_off
;
256 enum i40iw_status_code ret_code
;
257 bool read_fence
= false;
260 op_info
= &info
->op
.rdma_write
;
261 if (op_info
->num_lo_sges
> qp
->max_sq_frag_cnt
)
262 return I40IW_ERR_INVALID_FRAG_COUNT
;
264 for (i
= 0; i
< op_info
->num_lo_sges
; i
++)
265 total_size
+= op_info
->lo_sg_list
[i
].len
;
267 if (total_size
> I40IW_MAX_OUTBOUND_MESSAGE_SIZE
)
268 return I40IW_ERR_QP_INVALID_MSG_SIZE
;
270 read_fence
|= info
->read_fence
;
272 ret_code
= i40iw_fragcnt_to_wqesize_sq(op_info
->num_lo_sges
, &wqe_size
);
276 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, wqe_size
, total_size
, info
->wr_id
);
278 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
279 set_64bit_val(wqe
, 16,
280 LS_64(op_info
->rem_addr
.tag_off
, I40IWQPSQ_FRAG_TO
));
281 if (!op_info
->rem_addr
.stag
)
282 return I40IW_ERR_BAD_STAG
;
284 header
= LS_64(op_info
->rem_addr
.stag
, I40IWQPSQ_REMSTAG
) |
285 LS_64(I40IWQP_OP_RDMA_WRITE
, I40IWQPSQ_OPCODE
) |
286 LS_64((op_info
->num_lo_sges
> 1 ? (op_info
->num_lo_sges
- 1) : 0), I40IWQPSQ_ADDFRAGCNT
) |
287 LS_64(read_fence
, I40IWQPSQ_READFENCE
) |
288 LS_64(info
->local_fence
, I40IWQPSQ_LOCALFENCE
) |
289 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
290 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
292 i40iw_set_fragment(wqe
, 0, op_info
->lo_sg_list
);
294 for (i
= 1, byte_off
= 32; i
< op_info
->num_lo_sges
; i
++) {
295 i40iw_set_fragment(wqe
, byte_off
, &op_info
->lo_sg_list
[i
]);
299 wmb(); /* make sure WQE is populated before valid bit is set */
301 set_64bit_val(wqe
, 24, header
);
304 i40iw_qp_post_wr(qp
);
310 * i40iw_rdma_read - rdma read command
312 * @info: post sq information
313 * @inv_stag: flag for inv_stag
314 * @post_sq: flag to post sq
316 static enum i40iw_status_code
i40iw_rdma_read(struct i40iw_qp_uk
*qp
,
317 struct i40iw_post_sq_info
*info
,
322 struct i40iw_rdma_read
*op_info
;
325 enum i40iw_status_code ret_code
;
327 bool local_fence
= false;
329 op_info
= &info
->op
.rdma_read
;
330 ret_code
= i40iw_fragcnt_to_wqesize_sq(1, &wqe_size
);
333 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, wqe_size
, op_info
->lo_addr
.len
, info
->wr_id
);
335 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
336 local_fence
|= info
->local_fence
;
338 set_64bit_val(wqe
, 16, LS_64(op_info
->rem_addr
.tag_off
, I40IWQPSQ_FRAG_TO
));
339 header
= LS_64(op_info
->rem_addr
.stag
, I40IWQPSQ_REMSTAG
) |
340 LS_64((inv_stag
? I40IWQP_OP_RDMA_READ_LOC_INV
: I40IWQP_OP_RDMA_READ
), I40IWQPSQ_OPCODE
) |
341 LS_64(info
->read_fence
, I40IWQPSQ_READFENCE
) |
342 LS_64(local_fence
, I40IWQPSQ_LOCALFENCE
) |
343 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
344 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
346 i40iw_set_fragment(wqe
, 0, &op_info
->lo_addr
);
348 wmb(); /* make sure WQE is populated before valid bit is set */
350 set_64bit_val(wqe
, 24, header
);
352 i40iw_qp_post_wr(qp
);
358 * i40iw_send - rdma send command
360 * @info: post sq information
361 * @stag_to_inv: stag_to_inv value
362 * @post_sq: flag to post sq
364 static enum i40iw_status_code
i40iw_send(struct i40iw_qp_uk
*qp
,
365 struct i40iw_post_sq_info
*info
,
370 struct i40iw_post_send
*op_info
;
372 u32 i
, wqe_idx
, total_size
= 0, byte_off
;
373 enum i40iw_status_code ret_code
;
374 bool read_fence
= false;
377 op_info
= &info
->op
.send
;
378 if (qp
->max_sq_frag_cnt
< op_info
->num_sges
)
379 return I40IW_ERR_INVALID_FRAG_COUNT
;
381 for (i
= 0; i
< op_info
->num_sges
; i
++)
382 total_size
+= op_info
->sg_list
[i
].len
;
383 ret_code
= i40iw_fragcnt_to_wqesize_sq(op_info
->num_sges
, &wqe_size
);
387 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, wqe_size
, total_size
, info
->wr_id
);
389 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
391 read_fence
|= info
->read_fence
;
392 set_64bit_val(wqe
, 16, 0);
393 header
= LS_64(stag_to_inv
, I40IWQPSQ_REMSTAG
) |
394 LS_64(info
->op_type
, I40IWQPSQ_OPCODE
) |
395 LS_64((op_info
->num_sges
> 1 ? (op_info
->num_sges
- 1) : 0),
396 I40IWQPSQ_ADDFRAGCNT
) |
397 LS_64(read_fence
, I40IWQPSQ_READFENCE
) |
398 LS_64(info
->local_fence
, I40IWQPSQ_LOCALFENCE
) |
399 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
400 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
402 i40iw_set_fragment(wqe
, 0, op_info
->sg_list
);
404 for (i
= 1, byte_off
= 32; i
< op_info
->num_sges
; i
++) {
405 i40iw_set_fragment(wqe
, byte_off
, &op_info
->sg_list
[i
]);
409 wmb(); /* make sure WQE is populated before valid bit is set */
411 set_64bit_val(wqe
, 24, header
);
413 i40iw_qp_post_wr(qp
);
419 * i40iw_inline_rdma_write - inline rdma write operation
421 * @info: post sq information
422 * @post_sq: flag to post sq
424 static enum i40iw_status_code
i40iw_inline_rdma_write(struct i40iw_qp_uk
*qp
,
425 struct i40iw_post_sq_info
*info
,
430 struct i40iw_inline_rdma_write
*op_info
;
434 enum i40iw_status_code ret_code
;
435 bool read_fence
= false;
438 op_info
= &info
->op
.inline_rdma_write
;
439 if (op_info
->len
> I40IW_MAX_INLINE_DATA_SIZE
)
440 return I40IW_ERR_INVALID_IMM_DATA_SIZE
;
442 ret_code
= i40iw_inline_data_size_to_wqesize(op_info
->len
, &wqe_size
);
446 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, wqe_size
, op_info
->len
, info
->wr_id
);
448 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
450 read_fence
|= info
->read_fence
;
451 set_64bit_val(wqe
, 16,
452 LS_64(op_info
->rem_addr
.tag_off
, I40IWQPSQ_FRAG_TO
));
454 header
= LS_64(op_info
->rem_addr
.stag
, I40IWQPSQ_REMSTAG
) |
455 LS_64(I40IWQP_OP_RDMA_WRITE
, I40IWQPSQ_OPCODE
) |
456 LS_64(op_info
->len
, I40IWQPSQ_INLINEDATALEN
) |
457 LS_64(1, I40IWQPSQ_INLINEDATAFLAG
) |
458 LS_64((qp
->push_db
? 1 : 0), I40IWQPSQ_PUSHWQE
) |
459 LS_64(read_fence
, I40IWQPSQ_READFENCE
) |
460 LS_64(info
->local_fence
, I40IWQPSQ_LOCALFENCE
) |
461 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
462 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
465 src
= (u8
*)(op_info
->data
);
467 if (op_info
->len
<= 16) {
468 for (i
= 0; i
< op_info
->len
; i
++, src
++, dest
++)
471 for (i
= 0; i
< 16; i
++, src
++, dest
++)
473 dest
= (u8
*)wqe
+ 32;
474 for (; i
< op_info
->len
; i
++, src
++, dest
++)
478 wmb(); /* make sure WQE is populated before valid bit is set */
480 set_64bit_val(wqe
, 24, header
);
483 push
= (u64
*)((uintptr_t)qp
->push_wqe
+ (wqe_idx
& 0x3) * 0x20);
484 memcpy(push
, wqe
, (op_info
->len
> 16) ? op_info
->len
+ 16 : 32);
485 i40iw_qp_ring_push_db(qp
, wqe_idx
);
488 i40iw_qp_post_wr(qp
);
495 * i40iw_inline_send - inline send operation
497 * @info: post sq information
498 * @stag_to_inv: remote stag
499 * @post_sq: flag to post sq
501 static enum i40iw_status_code
i40iw_inline_send(struct i40iw_qp_uk
*qp
,
502 struct i40iw_post_sq_info
*info
,
508 struct i40iw_post_inline_send
*op_info
;
511 enum i40iw_status_code ret_code
;
512 bool read_fence
= false;
516 op_info
= &info
->op
.inline_send
;
517 if (op_info
->len
> I40IW_MAX_INLINE_DATA_SIZE
)
518 return I40IW_ERR_INVALID_IMM_DATA_SIZE
;
520 ret_code
= i40iw_inline_data_size_to_wqesize(op_info
->len
, &wqe_size
);
524 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, wqe_size
, op_info
->len
, info
->wr_id
);
526 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
528 read_fence
|= info
->read_fence
;
529 header
= LS_64(stag_to_inv
, I40IWQPSQ_REMSTAG
) |
530 LS_64(info
->op_type
, I40IWQPSQ_OPCODE
) |
531 LS_64(op_info
->len
, I40IWQPSQ_INLINEDATALEN
) |
532 LS_64(1, I40IWQPSQ_INLINEDATAFLAG
) |
533 LS_64((qp
->push_db
? 1 : 0), I40IWQPSQ_PUSHWQE
) |
534 LS_64(read_fence
, I40IWQPSQ_READFENCE
) |
535 LS_64(info
->local_fence
, I40IWQPSQ_LOCALFENCE
) |
536 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
537 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
540 src
= (u8
*)(op_info
->data
);
542 if (op_info
->len
<= 16) {
543 for (i
= 0; i
< op_info
->len
; i
++, src
++, dest
++)
546 for (i
= 0; i
< 16; i
++, src
++, dest
++)
548 dest
= (u8
*)wqe
+ 32;
549 for (; i
< op_info
->len
; i
++, src
++, dest
++)
553 wmb(); /* make sure WQE is populated before valid bit is set */
555 set_64bit_val(wqe
, 24, header
);
558 push
= (u64
*)((uintptr_t)qp
->push_wqe
+ (wqe_idx
& 0x3) * 0x20);
559 memcpy(push
, wqe
, (op_info
->len
> 16) ? op_info
->len
+ 16 : 32);
560 i40iw_qp_ring_push_db(qp
, wqe_idx
);
563 i40iw_qp_post_wr(qp
);
570 * i40iw_stag_local_invalidate - stag invalidate operation
572 * @info: post sq information
573 * @post_sq: flag to post sq
575 static enum i40iw_status_code
i40iw_stag_local_invalidate(struct i40iw_qp_uk
*qp
,
576 struct i40iw_post_sq_info
*info
,
580 struct i40iw_inv_local_stag
*op_info
;
583 bool local_fence
= false;
585 op_info
= &info
->op
.inv_local_stag
;
586 local_fence
= info
->local_fence
;
588 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, I40IW_QP_WQE_MIN_SIZE
, 0, info
->wr_id
);
590 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
591 set_64bit_val(wqe
, 0, 0);
592 set_64bit_val(wqe
, 8,
593 LS_64(op_info
->target_stag
, I40IWQPSQ_LOCSTAG
));
594 set_64bit_val(wqe
, 16, 0);
595 header
= LS_64(I40IW_OP_TYPE_INV_STAG
, I40IWQPSQ_OPCODE
) |
596 LS_64(info
->read_fence
, I40IWQPSQ_READFENCE
) |
597 LS_64(local_fence
, I40IWQPSQ_LOCALFENCE
) |
598 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
599 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
601 wmb(); /* make sure WQE is populated before valid bit is set */
603 set_64bit_val(wqe
, 24, header
);
606 i40iw_qp_post_wr(qp
);
612 * i40iw_mw_bind - Memory Window bind operation
614 * @info: post sq information
615 * @post_sq: flag to post sq
617 static enum i40iw_status_code
i40iw_mw_bind(struct i40iw_qp_uk
*qp
,
618 struct i40iw_post_sq_info
*info
,
622 struct i40iw_bind_window
*op_info
;
625 bool local_fence
= false;
627 op_info
= &info
->op
.bind_window
;
629 local_fence
|= info
->local_fence
;
630 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, I40IW_QP_WQE_MIN_SIZE
, 0, info
->wr_id
);
632 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
633 set_64bit_val(wqe
, 0, (uintptr_t)op_info
->va
);
634 set_64bit_val(wqe
, 8,
635 LS_64(op_info
->mr_stag
, I40IWQPSQ_PARENTMRSTAG
) |
636 LS_64(op_info
->mw_stag
, I40IWQPSQ_MWSTAG
));
637 set_64bit_val(wqe
, 16, op_info
->bind_length
);
638 header
= LS_64(I40IW_OP_TYPE_BIND_MW
, I40IWQPSQ_OPCODE
) |
639 LS_64(((op_info
->enable_reads
<< 2) |
640 (op_info
->enable_writes
<< 3)),
641 I40IWQPSQ_STAGRIGHTS
) |
642 LS_64((op_info
->addressing_type
== I40IW_ADDR_TYPE_VA_BASED
? 1 : 0),
643 I40IWQPSQ_VABASEDTO
) |
644 LS_64(info
->read_fence
, I40IWQPSQ_READFENCE
) |
645 LS_64(local_fence
, I40IWQPSQ_LOCALFENCE
) |
646 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
647 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
649 wmb(); /* make sure WQE is populated before valid bit is set */
651 set_64bit_val(wqe
, 24, header
);
654 i40iw_qp_post_wr(qp
);
660 * i40iw_post_receive - post receive wqe
662 * @info: post rq information
664 static enum i40iw_status_code
i40iw_post_receive(struct i40iw_qp_uk
*qp
,
665 struct i40iw_post_rq_info
*info
)
669 u32 total_size
= 0, wqe_idx
, i
, byte_off
;
671 if (qp
->max_rq_frag_cnt
< info
->num_sges
)
672 return I40IW_ERR_INVALID_FRAG_COUNT
;
673 for (i
= 0; i
< info
->num_sges
; i
++)
674 total_size
+= info
->sg_list
[i
].len
;
675 wqe
= i40iw_qp_get_next_recv_wqe(qp
, &wqe_idx
);
677 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
679 qp
->rq_wrid_array
[wqe_idx
] = info
->wr_id
;
680 set_64bit_val(wqe
, 16, 0);
682 header
= LS_64((info
->num_sges
> 1 ? (info
->num_sges
- 1) : 0),
683 I40IWQPSQ_ADDFRAGCNT
) |
684 LS_64(qp
->rwqe_polarity
, I40IWQPSQ_VALID
);
686 i40iw_set_fragment(wqe
, 0, info
->sg_list
);
688 for (i
= 1, byte_off
= 32; i
< info
->num_sges
; i
++) {
689 i40iw_set_fragment(wqe
, byte_off
, &info
->sg_list
[i
]);
693 wmb(); /* make sure WQE is populated before valid bit is set */
695 set_64bit_val(wqe
, 24, header
);
701 * i40iw_cq_request_notification - cq notification request (door bell)
703 * @cq_notify: notification type
705 static void i40iw_cq_request_notification(struct i40iw_cq_uk
*cq
,
706 enum i40iw_completion_notify cq_notify
)
714 get_64bit_val(cq
->shadow_area
, 32, &temp_val
);
715 arm_seq_num
= (u8
)RS_64(temp_val
, I40IW_CQ_DBSA_ARM_SEQ_NUM
);
718 sw_cq_sel
= (u16
)RS_64(temp_val
, I40IW_CQ_DBSA_SW_CQ_SELECT
);
719 arm_next_se
= (u8
)RS_64(temp_val
, I40IW_CQ_DBSA_ARM_NEXT_SE
);
721 if (cq_notify
== IW_CQ_COMPL_EVENT
)
723 temp_val
= LS_64(arm_seq_num
, I40IW_CQ_DBSA_ARM_SEQ_NUM
) |
724 LS_64(sw_cq_sel
, I40IW_CQ_DBSA_SW_CQ_SELECT
) |
725 LS_64(arm_next_se
, I40IW_CQ_DBSA_ARM_NEXT_SE
) |
726 LS_64(arm_next
, I40IW_CQ_DBSA_ARM_NEXT
);
728 set_64bit_val(cq
->shadow_area
, 32, temp_val
);
730 wmb(); /* make sure WQE is populated before valid bit is set */
732 writel(cq
->cq_id
, cq
->cqe_alloc_reg
);
736 * i40iw_cq_post_entries - update tail in shadow memory
738 * @count: # of entries processed
740 static enum i40iw_status_code
i40iw_cq_post_entries(struct i40iw_cq_uk
*cq
,
743 I40IW_RING_MOVE_TAIL_BY_COUNT(cq
->cq_ring
, count
);
744 set_64bit_val(cq
->shadow_area
, 0,
745 I40IW_RING_GETCURRENT_HEAD(cq
->cq_ring
));
750 * i40iw_cq_poll_completion - get cq completion info
752 * @info: cq poll information returned
753 * @post_cq: update cq tail
755 static enum i40iw_status_code
i40iw_cq_poll_completion(struct i40iw_cq_uk
*cq
,
756 struct i40iw_cq_poll_info
*info
)
758 u64 comp_ctx
, qword0
, qword2
, qword3
, wqe_qword
;
760 struct i40iw_qp_uk
*qp
;
761 struct i40iw_ring
*pring
= NULL
;
762 u32 wqe_idx
, q_type
, array_idx
= 0;
763 enum i40iw_status_code ret_code
= 0;
764 bool move_cq_head
= true;
768 if (cq
->avoid_mem_cflct
)
769 cqe
= (u64
*)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(cq
);
771 cqe
= (u64
*)I40IW_GET_CURRENT_CQ_ELEMENT(cq
);
773 get_64bit_val(cqe
, 24, &qword3
);
774 polarity
= (u8
)RS_64(qword3
, I40IW_CQ_VALID
);
776 if (polarity
!= cq
->polarity
)
777 return I40IW_ERR_QUEUE_EMPTY
;
779 q_type
= (u8
)RS_64(qword3
, I40IW_CQ_SQ
);
780 info
->error
= (bool)RS_64(qword3
, I40IW_CQ_ERROR
);
781 info
->push_dropped
= (bool)RS_64(qword3
, I40IWCQ_PSHDROP
);
783 info
->comp_status
= I40IW_COMPL_STATUS_FLUSHED
;
784 info
->major_err
= (bool)RS_64(qword3
, I40IW_CQ_MAJERR
);
785 info
->minor_err
= (bool)RS_64(qword3
, I40IW_CQ_MINERR
);
787 info
->comp_status
= I40IW_COMPL_STATUS_SUCCESS
;
790 get_64bit_val(cqe
, 0, &qword0
);
791 get_64bit_val(cqe
, 16, &qword2
);
793 info
->tcp_seq_num
= (u8
)RS_64(qword0
, I40IWCQ_TCPSEQNUM
);
795 info
->qp_id
= (u32
)RS_64(qword2
, I40IWCQ_QPID
);
797 get_64bit_val(cqe
, 8, &comp_ctx
);
799 info
->solicited_event
= (bool)RS_64(qword3
, I40IWCQ_SOEVENT
);
800 info
->is_srq
= (bool)RS_64(qword3
, I40IWCQ_SRQ
);
802 qp
= (struct i40iw_qp_uk
*)(unsigned long)comp_ctx
;
804 ret_code
= I40IW_ERR_QUEUE_DESTROYED
;
807 wqe_idx
= (u32
)RS_64(qword3
, I40IW_CQ_WQEIDX
);
808 info
->qp_handle
= (i40iw_qp_handle
)(unsigned long)qp
;
810 if (q_type
== I40IW_CQE_QTYPE_RQ
) {
811 array_idx
= (wqe_idx
* 4) / qp
->rq_wqe_size_multiplier
;
812 if (info
->comp_status
== I40IW_COMPL_STATUS_FLUSHED
) {
813 info
->wr_id
= qp
->rq_wrid_array
[qp
->rq_ring
.tail
];
814 array_idx
= qp
->rq_ring
.tail
;
816 info
->wr_id
= qp
->rq_wrid_array
[array_idx
];
819 info
->op_type
= I40IW_OP_TYPE_REC
;
820 if (qword3
& I40IWCQ_STAG_MASK
) {
821 info
->stag_invalid_set
= true;
822 info
->inv_stag
= (u32
)RS_64(qword2
, I40IWCQ_INVSTAG
);
824 info
->stag_invalid_set
= false;
826 info
->bytes_xfered
= (u32
)RS_64(qword0
, I40IWCQ_PAYLDLEN
);
827 I40IW_RING_SET_TAIL(qp
->rq_ring
, array_idx
+ 1);
828 pring
= &qp
->rq_ring
;
830 if (info
->comp_status
!= I40IW_COMPL_STATUS_FLUSHED
) {
831 info
->wr_id
= qp
->sq_wrtrk_array
[wqe_idx
].wrid
;
832 info
->bytes_xfered
= qp
->sq_wrtrk_array
[wqe_idx
].wr_len
;
834 info
->op_type
= (u8
)RS_64(qword3
, I40IWCQ_OP
);
835 sw_wqe
= qp
->sq_base
[wqe_idx
].elem
;
836 get_64bit_val(sw_wqe
, 24, &wqe_qword
);
838 addl_wqes
= qp
->sq_wrtrk_array
[wqe_idx
].wqe_size
/ I40IW_QP_WQE_MIN_SIZE
;
839 I40IW_RING_SET_TAIL(qp
->sq_ring
, (wqe_idx
+ addl_wqes
));
845 tail
= qp
->sq_ring
.tail
;
846 sw_wqe
= qp
->sq_base
[tail
].elem
;
847 get_64bit_val(sw_wqe
, 24, &wqe_qword
);
848 op_type
= (u8
)RS_64(wqe_qword
, I40IWQPSQ_OPCODE
);
849 info
->op_type
= op_type
;
850 addl_wqes
= qp
->sq_wrtrk_array
[tail
].wqe_size
/ I40IW_QP_WQE_MIN_SIZE
;
851 I40IW_RING_SET_TAIL(qp
->sq_ring
, (tail
+ addl_wqes
));
852 if (op_type
!= I40IWQP_OP_NOP
) {
853 info
->wr_id
= qp
->sq_wrtrk_array
[tail
].wrid
;
854 info
->bytes_xfered
= qp
->sq_wrtrk_array
[tail
].wr_len
;
859 pring
= &qp
->sq_ring
;
866 (info
->comp_status
== I40IW_COMPL_STATUS_FLUSHED
))
867 if (pring
&& (I40IW_RING_MORE_WORK(*pring
)))
868 move_cq_head
= false;
871 I40IW_RING_MOVE_HEAD_NOCHECK(cq
->cq_ring
);
873 if (I40IW_RING_GETCURRENT_HEAD(cq
->cq_ring
) == 0)
876 I40IW_RING_MOVE_TAIL(cq
->cq_ring
);
877 set_64bit_val(cq
->shadow_area
, 0,
878 I40IW_RING_GETCURRENT_HEAD(cq
->cq_ring
));
882 qword3
&= ~I40IW_CQ_WQEIDX_MASK
;
883 qword3
|= LS_64(pring
->tail
, I40IW_CQ_WQEIDX
);
884 set_64bit_val(cqe
, 24, qword3
);
891 * i40iw_get_wqe_shift - get shift count for maximum wqe size
892 * @wqdepth: depth of wq required.
893 * @sge: Maximum Scatter Gather Elements wqe
894 * @inline_data: Maximum inline data size
895 * @shift: Returns the shift needed based on sge
897 * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
898 * For 1 SGE or inline data <= 16, shift = 0 (wqe size of 32 bytes).
899 * For 2 or 3 SGEs or inline data <= 48, shift = 1 (wqe size of 64 bytes).
900 * Shift of 2 otherwise (wqe size of 128 bytes).
902 enum i40iw_status_code
i40iw_get_wqe_shift(u32 wqdepth
, u32 sge
, u32 inline_data
, u8
*shift
)
907 if (sge
> 1 || inline_data
> 16)
908 *shift
= (sge
< 4 && inline_data
<= 48) ? 1 : 2;
910 /* check if wqdepth is multiple of 2 or not */
912 if ((wqdepth
< I40IWQP_SW_MIN_WQSIZE
) || (wqdepth
& (wqdepth
- 1)))
913 return I40IW_ERR_INVALID_SIZE
;
915 size
= wqdepth
<< *shift
; /* multiple of 32 bytes count */
916 if (size
> I40IWQP_SW_MAX_WQSIZE
)
917 return I40IW_ERR_INVALID_SIZE
;
921 static struct i40iw_qp_uk_ops iw_qp_uk_ops
= {
923 i40iw_qp_ring_push_db
,
927 i40iw_inline_rdma_write
,
929 i40iw_stag_local_invalidate
,
935 static struct i40iw_cq_ops iw_cq_ops
= {
936 i40iw_cq_request_notification
,
937 i40iw_cq_poll_completion
,
938 i40iw_cq_post_entries
,
942 static struct i40iw_device_uk_ops iw_device_uk_ops
= {
948 * i40iw_qp_uk_init - initialize shared qp
949 * @qp: hw qp (user and kernel)
950 * @info: qp initialization info
952 * initializes the vars used in both user and kernel mode.
953 * size of the wqe depends on numbers of max. fragements
954 * allowed. Then size of wqe * the number of wqes should be the
955 * amount of memory allocated for sq and rq. If srq is used,
956 * then rq_base will point to one rq wqe only (not the whole
959 enum i40iw_status_code
i40iw_qp_uk_init(struct i40iw_qp_uk
*qp
,
960 struct i40iw_qp_uk_init_info
*info
)
962 enum i40iw_status_code ret_code
= 0;
966 if (info
->max_sq_frag_cnt
> I40IW_MAX_WQ_FRAGMENT_COUNT
)
967 return I40IW_ERR_INVALID_FRAG_COUNT
;
969 if (info
->max_rq_frag_cnt
> I40IW_MAX_WQ_FRAGMENT_COUNT
)
970 return I40IW_ERR_INVALID_FRAG_COUNT
;
971 ret_code
= i40iw_get_wqe_shift(info
->sq_size
, info
->max_sq_frag_cnt
, info
->max_inline_data
, &sqshift
);
975 ret_code
= i40iw_get_wqe_shift(info
->rq_size
, info
->max_rq_frag_cnt
, 0, &rqshift
);
979 qp
->sq_base
= info
->sq
;
980 qp
->rq_base
= info
->rq
;
981 qp
->shadow_area
= info
->shadow_area
;
982 qp
->sq_wrtrk_array
= info
->sq_wrtrk_array
;
983 qp
->rq_wrid_array
= info
->rq_wrid_array
;
985 qp
->wqe_alloc_reg
= info
->wqe_alloc_reg
;
986 qp
->qp_id
= info
->qp_id
;
988 qp
->sq_size
= info
->sq_size
;
989 qp
->push_db
= info
->push_db
;
990 qp
->push_wqe
= info
->push_wqe
;
992 qp
->max_sq_frag_cnt
= info
->max_sq_frag_cnt
;
993 sq_ring_size
= qp
->sq_size
<< sqshift
;
995 I40IW_RING_INIT(qp
->sq_ring
, sq_ring_size
);
996 I40IW_RING_INIT(qp
->initial_ring
, sq_ring_size
);
997 I40IW_RING_MOVE_HEAD(qp
->sq_ring
, ret_code
);
998 I40IW_RING_MOVE_TAIL(qp
->sq_ring
);
999 I40IW_RING_MOVE_HEAD(qp
->initial_ring
, ret_code
);
1000 qp
->swqe_polarity
= 1;
1001 qp
->swqe_polarity_deferred
= 1;
1002 qp
->rwqe_polarity
= 0;
1005 qp
->rq_size
= info
->rq_size
;
1006 qp
->max_rq_frag_cnt
= info
->max_rq_frag_cnt
;
1007 qp
->rq_wqe_size
= rqshift
;
1008 I40IW_RING_INIT(qp
->rq_ring
, qp
->rq_size
);
1009 qp
->rq_wqe_size_multiplier
= 4 << rqshift
;
1011 qp
->ops
= iw_qp_uk_ops
;
1017 * i40iw_cq_uk_init - initialize shared cq (user and kernel)
1019 * @info: hw cq initialization info
1021 enum i40iw_status_code
i40iw_cq_uk_init(struct i40iw_cq_uk
*cq
,
1022 struct i40iw_cq_uk_init_info
*info
)
1024 if ((info
->cq_size
< I40IW_MIN_CQ_SIZE
) ||
1025 (info
->cq_size
> I40IW_MAX_CQ_SIZE
))
1026 return I40IW_ERR_INVALID_SIZE
;
1027 cq
->cq_base
= (struct i40iw_cqe
*)info
->cq_base
;
1028 cq
->cq_id
= info
->cq_id
;
1029 cq
->cq_size
= info
->cq_size
;
1030 cq
->cqe_alloc_reg
= info
->cqe_alloc_reg
;
1031 cq
->shadow_area
= info
->shadow_area
;
1032 cq
->avoid_mem_cflct
= info
->avoid_mem_cflct
;
1034 I40IW_RING_INIT(cq
->cq_ring
, cq
->cq_size
);
1036 cq
->ops
= iw_cq_ops
;
1042 * i40iw_device_init_uk - setup routines for iwarp shared device
1043 * @dev: iwarp shared (user and kernel)
1045 void i40iw_device_init_uk(struct i40iw_dev_uk
*dev
)
1047 dev
->ops_uk
= iw_device_uk_ops
;
1051 * i40iw_clean_cq - clean cq entries
1052 * @ queue completion context
1055 void i40iw_clean_cq(void *queue
, struct i40iw_cq_uk
*cq
)
1058 u64 qword3
, comp_ctx
;
1062 cq_head
= cq
->cq_ring
.head
;
1063 temp
= cq
->polarity
;
1065 if (cq
->avoid_mem_cflct
)
1066 cqe
= (u64
*)&(((struct i40iw_extended_cqe
*)cq
->cq_base
)[cq_head
]);
1068 cqe
= (u64
*)&cq
->cq_base
[cq_head
];
1069 get_64bit_val(cqe
, 24, &qword3
);
1070 polarity
= (u8
)RS_64(qword3
, I40IW_CQ_VALID
);
1072 if (polarity
!= temp
)
1075 get_64bit_val(cqe
, 8, &comp_ctx
);
1076 if ((void *)(unsigned long)comp_ctx
== queue
)
1077 set_64bit_val(cqe
, 8, 0);
1079 cq_head
= (cq_head
+ 1) % cq
->cq_ring
.size
;
1086 * i40iw_nop - send a nop
1088 * @wr_id: work request id
1089 * @signaled: flag if signaled for completion
1090 * @post_sq: flag to post sq
1092 enum i40iw_status_code
i40iw_nop(struct i40iw_qp_uk
*qp
,
1100 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, I40IW_QP_WQE_MIN_SIZE
, 0, wr_id
);
1102 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
1103 set_64bit_val(wqe
, 0, 0);
1104 set_64bit_val(wqe
, 8, 0);
1105 set_64bit_val(wqe
, 16, 0);
1107 header
= LS_64(I40IWQP_OP_NOP
, I40IWQPSQ_OPCODE
) |
1108 LS_64(signaled
, I40IWQPSQ_SIGCOMPL
) |
1109 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
1111 wmb(); /* make sure WQE is populated before valid bit is set */
1113 set_64bit_val(wqe
, 24, header
);
1115 i40iw_qp_post_wr(qp
);
1121 * i40iw_fragcnt_to_wqesize_sq - calculate wqe size based on fragment count for SQ
1122 * @frag_cnt: number of fragments
1123 * @wqe_size: size of sq wqe returned
1125 enum i40iw_status_code
i40iw_fragcnt_to_wqesize_sq(u32 frag_cnt
, u8
*wqe_size
)
1130 *wqe_size
= I40IW_QP_WQE_MIN_SIZE
;
1145 return I40IW_ERR_INVALID_FRAG_COUNT
;
1152 * i40iw_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
1153 * @frag_cnt: number of fragments
1154 * @wqe_size: size of rq wqe returned
1156 enum i40iw_status_code
i40iw_fragcnt_to_wqesize_rq(u32 frag_cnt
, u8
*wqe_size
)
1174 return I40IW_ERR_INVALID_FRAG_COUNT
;
1181 * i40iw_inline_data_size_to_wqesize - based on inline data, wqe size
1182 * @data_size: data size for inline
1183 * @wqe_size: size of sq wqe returned
1185 enum i40iw_status_code
i40iw_inline_data_size_to_wqesize(u32 data_size
,
1188 if (data_size
> I40IW_MAX_INLINE_DATA_SIZE
)
1189 return I40IW_ERR_INVALID_IMM_DATA_SIZE
;
1191 if (data_size
<= 16)
1192 *wqe_size
= I40IW_QP_WQE_MIN_SIZE
;
1193 else if (data_size
<= 48)
1195 else if (data_size
<= 80)