1 /*******************************************************************************
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 *******************************************************************************/
35 #include "i40iw_osdep.h"
36 #include "i40iw_status.h"
38 #include "i40iw_user.h"
39 #include "i40iw_register.h"
41 static u32 nop_signature
= 0x55550000;
44 * i40iw_nop_1 - insert a nop wqe and move head. no post work
47 static enum i40iw_status_code
i40iw_nop_1(struct i40iw_qp_uk
*qp
)
51 u32 wqe_idx
, peek_head
;
52 bool signaled
= false;
54 if (!qp
->sq_ring
.head
)
55 return I40IW_ERR_PARAM
;
57 wqe_idx
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
58 wqe
= qp
->sq_base
[wqe_idx
].elem
;
60 qp
->sq_wrtrk_array
[wqe_idx
].wqe_size
= I40IW_QP_WQE_MIN_SIZE
;
62 peek_head
= (qp
->sq_ring
.head
+ 1) % qp
->sq_ring
.size
;
63 wqe_0
= qp
->sq_base
[peek_head
].elem
;
65 wqe_0
[3] = LS_64(!qp
->swqe_polarity
, I40IWQPSQ_VALID
);
67 wqe_0
[3] = LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
69 set_64bit_val(wqe
, 0, 0);
70 set_64bit_val(wqe
, 8, 0);
71 set_64bit_val(wqe
, 16, 0);
73 header
= LS_64(I40IWQP_OP_NOP
, I40IWQPSQ_OPCODE
) |
74 LS_64(signaled
, I40IWQPSQ_SIGCOMPL
) |
75 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
) | nop_signature
++;
77 wmb(); /* Memory barrier to ensure data is written before valid bit is set */
79 set_64bit_val(wqe
, 24, header
);
84 * i40iw_qp_post_wr - post wr to hrdware
87 void i40iw_qp_post_wr(struct i40iw_qp_uk
*qp
)
93 mb(); /* valid bit is written and loads completed before reading shadow */
95 /* read the doorbell shadow area */
96 get_64bit_val(qp
->shadow_area
, 0, &temp
);
98 hw_sq_tail
= (u32
)RS_64(temp
, I40IW_QP_DBSA_HW_SQ_TAIL
);
99 sw_sq_head
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
100 if (sw_sq_head
!= hw_sq_tail
) {
101 if (sw_sq_head
> qp
->initial_ring
.head
) {
102 if ((hw_sq_tail
>= qp
->initial_ring
.head
) &&
103 (hw_sq_tail
< sw_sq_head
)) {
104 writel(qp
->qp_id
, qp
->wqe_alloc_reg
);
106 } else if (sw_sq_head
!= qp
->initial_ring
.head
) {
107 if ((hw_sq_tail
>= qp
->initial_ring
.head
) ||
108 (hw_sq_tail
< sw_sq_head
)) {
109 writel(qp
->qp_id
, qp
->wqe_alloc_reg
);
114 qp
->initial_ring
.head
= qp
->sq_ring
.head
;
118 * i40iw_qp_ring_push_db - ring qp doorbell
120 * @wqe_idx: wqe index
122 static void i40iw_qp_ring_push_db(struct i40iw_qp_uk
*qp
, u32 wqe_idx
)
124 set_32bit_val(qp
->push_db
, 0, LS_32((wqe_idx
>> 2), I40E_PFPE_WQEALLOC_WQE_DESC_INDEX
) | qp
->qp_id
);
125 qp
->initial_ring
.head
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
129 * i40iw_qp_get_next_send_wqe - return next wqe ptr
131 * @wqe_idx: return wqe index
132 * @wqe_size: size of sq wqe
134 u64
*i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk
*qp
,
145 enum i40iw_status_code ret_code
= 0;
146 u8 nop_wqe_cnt
= 0, i
;
149 *wqe_idx
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
152 qp
->swqe_polarity
= !qp
->swqe_polarity
;
153 wqe_ptr
= (uintptr_t)qp
->sq_base
[*wqe_idx
].elem
;
154 offset
= (u16
)(wqe_ptr
) & 0x7F;
155 if ((offset
+ wqe_size
) > I40IW_QP_WQE_MAX_SIZE
) {
156 nop_wqe_cnt
= (u8
)(I40IW_QP_WQE_MAX_SIZE
- offset
) / I40IW_QP_WQE_MIN_SIZE
;
157 for (i
= 0; i
< nop_wqe_cnt
; i
++) {
159 I40IW_RING_MOVE_HEAD(qp
->sq_ring
, ret_code
);
164 *wqe_idx
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
166 qp
->swqe_polarity
= !qp
->swqe_polarity
;
169 if (((*wqe_idx
& 3) == 1) && (wqe_size
== I40IW_WQE_SIZE_64
)) {
171 I40IW_RING_MOVE_HEAD(qp
->sq_ring
, ret_code
);
174 *wqe_idx
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
176 qp
->swqe_polarity
= !qp
->swqe_polarity
;
178 I40IW_RING_MOVE_HEAD_BY_COUNT(qp
->sq_ring
,
179 wqe_size
/ I40IW_QP_WQE_MIN_SIZE
, ret_code
);
183 wqe
= qp
->sq_base
[*wqe_idx
].elem
;
185 peek_head
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
186 wqe_0
= qp
->sq_base
[peek_head
].elem
;
188 if (((peek_head
& 3) == 1) || ((peek_head
& 3) == 3)) {
189 if (RS_64(wqe_0
[3], I40IWQPSQ_VALID
) != !qp
->swqe_polarity
)
190 wqe_0
[3] = LS_64(!qp
->swqe_polarity
, I40IWQPSQ_VALID
);
193 qp
->sq_wrtrk_array
[*wqe_idx
].wrid
= wr_id
;
194 qp
->sq_wrtrk_array
[*wqe_idx
].wr_len
= total_size
;
195 qp
->sq_wrtrk_array
[*wqe_idx
].wqe_size
= wqe_size
;
200 * i40iw_set_fragment - set fragment in wqe
201 * @wqe: wqe for setting fragment
202 * @offset: offset value
203 * @sge: sge length and stag
205 static void i40iw_set_fragment(u64
*wqe
, u32 offset
, struct i40iw_sge
*sge
)
208 set_64bit_val(wqe
, offset
, LS_64(sge
->tag_off
, I40IWQPSQ_FRAG_TO
));
209 set_64bit_val(wqe
, (offset
+ 8),
210 (LS_64(sge
->len
, I40IWQPSQ_FRAG_LEN
) |
211 LS_64(sge
->stag
, I40IWQPSQ_FRAG_STAG
)));
216 * i40iw_qp_get_next_recv_wqe - get next qp's rcv wqe
218 * @wqe_idx: return wqe index
220 u64
*i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk
*qp
, u32
*wqe_idx
)
223 enum i40iw_status_code ret_code
;
225 if (I40IW_RING_FULL_ERR(qp
->rq_ring
))
228 I40IW_ATOMIC_RING_MOVE_HEAD(qp
->rq_ring
, *wqe_idx
, ret_code
);
232 qp
->rwqe_polarity
= !qp
->rwqe_polarity
;
233 /* rq_wqe_size_multiplier is no of qwords in one rq wqe */
234 wqe
= qp
->rq_base
[*wqe_idx
* (qp
->rq_wqe_size_multiplier
>> 2)].elem
;
240 * i40iw_rdma_write - rdma write operation
242 * @info: post sq information
243 * @post_sq: flag to post sq
245 static enum i40iw_status_code
i40iw_rdma_write(struct i40iw_qp_uk
*qp
,
246 struct i40iw_post_sq_info
*info
,
251 struct i40iw_rdma_write
*op_info
;
253 u32 total_size
= 0, byte_off
;
254 enum i40iw_status_code ret_code
;
255 bool read_fence
= false;
258 op_info
= &info
->op
.rdma_write
;
259 if (op_info
->num_lo_sges
> qp
->max_sq_frag_cnt
)
260 return I40IW_ERR_INVALID_FRAG_COUNT
;
262 for (i
= 0; i
< op_info
->num_lo_sges
; i
++)
263 total_size
+= op_info
->lo_sg_list
[i
].len
;
265 if (total_size
> I40IW_MAX_OUTBOUND_MESSAGE_SIZE
)
266 return I40IW_ERR_QP_INVALID_MSG_SIZE
;
268 read_fence
|= info
->read_fence
;
270 ret_code
= i40iw_fragcnt_to_wqesize_sq(op_info
->num_lo_sges
, &wqe_size
);
274 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, wqe_size
, total_size
, info
->wr_id
);
276 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
277 set_64bit_val(wqe
, 16,
278 LS_64(op_info
->rem_addr
.tag_off
, I40IWQPSQ_FRAG_TO
));
279 if (!op_info
->rem_addr
.stag
)
280 return I40IW_ERR_BAD_STAG
;
282 header
= LS_64(op_info
->rem_addr
.stag
, I40IWQPSQ_REMSTAG
) |
283 LS_64(I40IWQP_OP_RDMA_WRITE
, I40IWQPSQ_OPCODE
) |
284 LS_64((op_info
->num_lo_sges
> 1 ? (op_info
->num_lo_sges
- 1) : 0), I40IWQPSQ_ADDFRAGCNT
) |
285 LS_64(read_fence
, I40IWQPSQ_READFENCE
) |
286 LS_64(info
->local_fence
, I40IWQPSQ_LOCALFENCE
) |
287 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
288 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
290 i40iw_set_fragment(wqe
, 0, op_info
->lo_sg_list
);
292 for (i
= 1, byte_off
= 32; i
< op_info
->num_lo_sges
; i
++) {
293 i40iw_set_fragment(wqe
, byte_off
, &op_info
->lo_sg_list
[i
]);
297 wmb(); /* make sure WQE is populated before valid bit is set */
299 set_64bit_val(wqe
, 24, header
);
302 i40iw_qp_post_wr(qp
);
308 * i40iw_rdma_read - rdma read command
310 * @info: post sq information
311 * @inv_stag: flag for inv_stag
312 * @post_sq: flag to post sq
314 static enum i40iw_status_code
i40iw_rdma_read(struct i40iw_qp_uk
*qp
,
315 struct i40iw_post_sq_info
*info
,
320 struct i40iw_rdma_read
*op_info
;
323 enum i40iw_status_code ret_code
;
325 bool local_fence
= false;
327 op_info
= &info
->op
.rdma_read
;
328 ret_code
= i40iw_fragcnt_to_wqesize_sq(1, &wqe_size
);
331 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, wqe_size
, op_info
->lo_addr
.len
, info
->wr_id
);
333 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
334 local_fence
|= info
->local_fence
;
336 set_64bit_val(wqe
, 16, LS_64(op_info
->rem_addr
.tag_off
, I40IWQPSQ_FRAG_TO
));
337 header
= LS_64(op_info
->rem_addr
.stag
, I40IWQPSQ_REMSTAG
) |
338 LS_64((inv_stag
? I40IWQP_OP_RDMA_READ_LOC_INV
: I40IWQP_OP_RDMA_READ
), I40IWQPSQ_OPCODE
) |
339 LS_64(info
->read_fence
, I40IWQPSQ_READFENCE
) |
340 LS_64(local_fence
, I40IWQPSQ_LOCALFENCE
) |
341 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
342 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
344 i40iw_set_fragment(wqe
, 0, &op_info
->lo_addr
);
346 wmb(); /* make sure WQE is populated before valid bit is set */
348 set_64bit_val(wqe
, 24, header
);
350 i40iw_qp_post_wr(qp
);
356 * i40iw_send - rdma send command
358 * @info: post sq information
359 * @stag_to_inv: stag_to_inv value
360 * @post_sq: flag to post sq
362 static enum i40iw_status_code
i40iw_send(struct i40iw_qp_uk
*qp
,
363 struct i40iw_post_sq_info
*info
,
368 struct i40iw_post_send
*op_info
;
370 u32 i
, wqe_idx
, total_size
= 0, byte_off
;
371 enum i40iw_status_code ret_code
;
372 bool read_fence
= false;
375 op_info
= &info
->op
.send
;
376 if (qp
->max_sq_frag_cnt
< op_info
->num_sges
)
377 return I40IW_ERR_INVALID_FRAG_COUNT
;
379 for (i
= 0; i
< op_info
->num_sges
; i
++)
380 total_size
+= op_info
->sg_list
[i
].len
;
381 ret_code
= i40iw_fragcnt_to_wqesize_sq(op_info
->num_sges
, &wqe_size
);
385 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, wqe_size
, total_size
, info
->wr_id
);
387 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
389 read_fence
|= info
->read_fence
;
390 set_64bit_val(wqe
, 16, 0);
391 header
= LS_64(stag_to_inv
, I40IWQPSQ_REMSTAG
) |
392 LS_64(info
->op_type
, I40IWQPSQ_OPCODE
) |
393 LS_64((op_info
->num_sges
> 1 ? (op_info
->num_sges
- 1) : 0),
394 I40IWQPSQ_ADDFRAGCNT
) |
395 LS_64(read_fence
, I40IWQPSQ_READFENCE
) |
396 LS_64(info
->local_fence
, I40IWQPSQ_LOCALFENCE
) |
397 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
398 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
400 i40iw_set_fragment(wqe
, 0, op_info
->sg_list
);
402 for (i
= 1, byte_off
= 32; i
< op_info
->num_sges
; i
++) {
403 i40iw_set_fragment(wqe
, byte_off
, &op_info
->sg_list
[i
]);
407 wmb(); /* make sure WQE is populated before valid bit is set */
409 set_64bit_val(wqe
, 24, header
);
411 i40iw_qp_post_wr(qp
);
417 * i40iw_inline_rdma_write - inline rdma write operation
419 * @info: post sq information
420 * @post_sq: flag to post sq
422 static enum i40iw_status_code
i40iw_inline_rdma_write(struct i40iw_qp_uk
*qp
,
423 struct i40iw_post_sq_info
*info
,
428 struct i40iw_inline_rdma_write
*op_info
;
432 enum i40iw_status_code ret_code
;
433 bool read_fence
= false;
436 op_info
= &info
->op
.inline_rdma_write
;
437 if (op_info
->len
> I40IW_MAX_INLINE_DATA_SIZE
)
438 return I40IW_ERR_INVALID_INLINE_DATA_SIZE
;
440 ret_code
= i40iw_inline_data_size_to_wqesize(op_info
->len
, &wqe_size
);
444 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, wqe_size
, op_info
->len
, info
->wr_id
);
446 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
448 read_fence
|= info
->read_fence
;
449 set_64bit_val(wqe
, 16,
450 LS_64(op_info
->rem_addr
.tag_off
, I40IWQPSQ_FRAG_TO
));
452 header
= LS_64(op_info
->rem_addr
.stag
, I40IWQPSQ_REMSTAG
) |
453 LS_64(I40IWQP_OP_RDMA_WRITE
, I40IWQPSQ_OPCODE
) |
454 LS_64(op_info
->len
, I40IWQPSQ_INLINEDATALEN
) |
455 LS_64(1, I40IWQPSQ_INLINEDATAFLAG
) |
456 LS_64((qp
->push_db
? 1 : 0), I40IWQPSQ_PUSHWQE
) |
457 LS_64(read_fence
, I40IWQPSQ_READFENCE
) |
458 LS_64(info
->local_fence
, I40IWQPSQ_LOCALFENCE
) |
459 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
460 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
463 src
= (u8
*)(op_info
->data
);
465 if (op_info
->len
<= 16) {
466 memcpy(dest
, src
, op_info
->len
);
468 memcpy(dest
, src
, 16);
470 dest
= (u8
*)wqe
+ 32;
471 memcpy(dest
, src
, op_info
->len
- 16);
474 wmb(); /* make sure WQE is populated before valid bit is set */
476 set_64bit_val(wqe
, 24, header
);
479 push
= (u64
*)((uintptr_t)qp
->push_wqe
+ (wqe_idx
& 0x3) * 0x20);
480 memcpy(push
, wqe
, (op_info
->len
> 16) ? op_info
->len
+ 16 : 32);
481 i40iw_qp_ring_push_db(qp
, wqe_idx
);
484 i40iw_qp_post_wr(qp
);
491 * i40iw_inline_send - inline send operation
493 * @info: post sq information
494 * @stag_to_inv: remote stag
495 * @post_sq: flag to post sq
497 static enum i40iw_status_code
i40iw_inline_send(struct i40iw_qp_uk
*qp
,
498 struct i40iw_post_sq_info
*info
,
504 struct i40iw_post_inline_send
*op_info
;
507 enum i40iw_status_code ret_code
;
508 bool read_fence
= false;
512 op_info
= &info
->op
.inline_send
;
513 if (op_info
->len
> I40IW_MAX_INLINE_DATA_SIZE
)
514 return I40IW_ERR_INVALID_INLINE_DATA_SIZE
;
516 ret_code
= i40iw_inline_data_size_to_wqesize(op_info
->len
, &wqe_size
);
520 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, wqe_size
, op_info
->len
, info
->wr_id
);
522 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
524 read_fence
|= info
->read_fence
;
525 header
= LS_64(stag_to_inv
, I40IWQPSQ_REMSTAG
) |
526 LS_64(info
->op_type
, I40IWQPSQ_OPCODE
) |
527 LS_64(op_info
->len
, I40IWQPSQ_INLINEDATALEN
) |
528 LS_64(1, I40IWQPSQ_INLINEDATAFLAG
) |
529 LS_64((qp
->push_db
? 1 : 0), I40IWQPSQ_PUSHWQE
) |
530 LS_64(read_fence
, I40IWQPSQ_READFENCE
) |
531 LS_64(info
->local_fence
, I40IWQPSQ_LOCALFENCE
) |
532 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
533 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
536 src
= (u8
*)(op_info
->data
);
538 if (op_info
->len
<= 16) {
539 memcpy(dest
, src
, op_info
->len
);
541 memcpy(dest
, src
, 16);
543 dest
= (u8
*)wqe
+ 32;
544 memcpy(dest
, src
, op_info
->len
- 16);
547 wmb(); /* make sure WQE is populated before valid bit is set */
549 set_64bit_val(wqe
, 24, header
);
552 push
= (u64
*)((uintptr_t)qp
->push_wqe
+ (wqe_idx
& 0x3) * 0x20);
553 memcpy(push
, wqe
, (op_info
->len
> 16) ? op_info
->len
+ 16 : 32);
554 i40iw_qp_ring_push_db(qp
, wqe_idx
);
557 i40iw_qp_post_wr(qp
);
564 * i40iw_stag_local_invalidate - stag invalidate operation
566 * @info: post sq information
567 * @post_sq: flag to post sq
569 static enum i40iw_status_code
i40iw_stag_local_invalidate(struct i40iw_qp_uk
*qp
,
570 struct i40iw_post_sq_info
*info
,
574 struct i40iw_inv_local_stag
*op_info
;
577 bool local_fence
= false;
579 op_info
= &info
->op
.inv_local_stag
;
580 local_fence
= info
->local_fence
;
582 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, I40IW_QP_WQE_MIN_SIZE
, 0, info
->wr_id
);
584 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
585 set_64bit_val(wqe
, 0, 0);
586 set_64bit_val(wqe
, 8,
587 LS_64(op_info
->target_stag
, I40IWQPSQ_LOCSTAG
));
588 set_64bit_val(wqe
, 16, 0);
589 header
= LS_64(I40IW_OP_TYPE_INV_STAG
, I40IWQPSQ_OPCODE
) |
590 LS_64(info
->read_fence
, I40IWQPSQ_READFENCE
) |
591 LS_64(local_fence
, I40IWQPSQ_LOCALFENCE
) |
592 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
593 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
595 wmb(); /* make sure WQE is populated before valid bit is set */
597 set_64bit_val(wqe
, 24, header
);
600 i40iw_qp_post_wr(qp
);
606 * i40iw_mw_bind - Memory Window bind operation
608 * @info: post sq information
609 * @post_sq: flag to post sq
611 static enum i40iw_status_code
i40iw_mw_bind(struct i40iw_qp_uk
*qp
,
612 struct i40iw_post_sq_info
*info
,
616 struct i40iw_bind_window
*op_info
;
619 bool local_fence
= false;
621 op_info
= &info
->op
.bind_window
;
623 local_fence
|= info
->local_fence
;
624 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, I40IW_QP_WQE_MIN_SIZE
, 0, info
->wr_id
);
626 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
627 set_64bit_val(wqe
, 0, (uintptr_t)op_info
->va
);
628 set_64bit_val(wqe
, 8,
629 LS_64(op_info
->mr_stag
, I40IWQPSQ_PARENTMRSTAG
) |
630 LS_64(op_info
->mw_stag
, I40IWQPSQ_MWSTAG
));
631 set_64bit_val(wqe
, 16, op_info
->bind_length
);
632 header
= LS_64(I40IW_OP_TYPE_BIND_MW
, I40IWQPSQ_OPCODE
) |
633 LS_64(((op_info
->enable_reads
<< 2) |
634 (op_info
->enable_writes
<< 3)),
635 I40IWQPSQ_STAGRIGHTS
) |
636 LS_64((op_info
->addressing_type
== I40IW_ADDR_TYPE_VA_BASED
? 1 : 0),
637 I40IWQPSQ_VABASEDTO
) |
638 LS_64(info
->read_fence
, I40IWQPSQ_READFENCE
) |
639 LS_64(local_fence
, I40IWQPSQ_LOCALFENCE
) |
640 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
641 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
643 wmb(); /* make sure WQE is populated before valid bit is set */
645 set_64bit_val(wqe
, 24, header
);
648 i40iw_qp_post_wr(qp
);
654 * i40iw_post_receive - post receive wqe
656 * @info: post rq information
658 static enum i40iw_status_code
i40iw_post_receive(struct i40iw_qp_uk
*qp
,
659 struct i40iw_post_rq_info
*info
)
663 u32 total_size
= 0, wqe_idx
, i
, byte_off
;
665 if (qp
->max_rq_frag_cnt
< info
->num_sges
)
666 return I40IW_ERR_INVALID_FRAG_COUNT
;
667 for (i
= 0; i
< info
->num_sges
; i
++)
668 total_size
+= info
->sg_list
[i
].len
;
669 wqe
= i40iw_qp_get_next_recv_wqe(qp
, &wqe_idx
);
671 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
673 qp
->rq_wrid_array
[wqe_idx
] = info
->wr_id
;
674 set_64bit_val(wqe
, 16, 0);
676 header
= LS_64((info
->num_sges
> 1 ? (info
->num_sges
- 1) : 0),
677 I40IWQPSQ_ADDFRAGCNT
) |
678 LS_64(qp
->rwqe_polarity
, I40IWQPSQ_VALID
);
680 i40iw_set_fragment(wqe
, 0, info
->sg_list
);
682 for (i
= 1, byte_off
= 32; i
< info
->num_sges
; i
++) {
683 i40iw_set_fragment(wqe
, byte_off
, &info
->sg_list
[i
]);
687 wmb(); /* make sure WQE is populated before valid bit is set */
689 set_64bit_val(wqe
, 24, header
);
695 * i40iw_cq_request_notification - cq notification request (door bell)
697 * @cq_notify: notification type
699 static void i40iw_cq_request_notification(struct i40iw_cq_uk
*cq
,
700 enum i40iw_completion_notify cq_notify
)
708 get_64bit_val(cq
->shadow_area
, 32, &temp_val
);
709 arm_seq_num
= (u8
)RS_64(temp_val
, I40IW_CQ_DBSA_ARM_SEQ_NUM
);
712 sw_cq_sel
= (u16
)RS_64(temp_val
, I40IW_CQ_DBSA_SW_CQ_SELECT
);
713 arm_next_se
= (u8
)RS_64(temp_val
, I40IW_CQ_DBSA_ARM_NEXT_SE
);
715 if (cq_notify
== IW_CQ_COMPL_EVENT
)
717 temp_val
= LS_64(arm_seq_num
, I40IW_CQ_DBSA_ARM_SEQ_NUM
) |
718 LS_64(sw_cq_sel
, I40IW_CQ_DBSA_SW_CQ_SELECT
) |
719 LS_64(arm_next_se
, I40IW_CQ_DBSA_ARM_NEXT_SE
) |
720 LS_64(arm_next
, I40IW_CQ_DBSA_ARM_NEXT
);
722 set_64bit_val(cq
->shadow_area
, 32, temp_val
);
724 wmb(); /* make sure WQE is populated before valid bit is set */
726 writel(cq
->cq_id
, cq
->cqe_alloc_reg
);
730 * i40iw_cq_post_entries - update tail in shadow memory
732 * @count: # of entries processed
734 static enum i40iw_status_code
i40iw_cq_post_entries(struct i40iw_cq_uk
*cq
,
737 I40IW_RING_MOVE_TAIL_BY_COUNT(cq
->cq_ring
, count
);
738 set_64bit_val(cq
->shadow_area
, 0,
739 I40IW_RING_GETCURRENT_HEAD(cq
->cq_ring
));
744 * i40iw_cq_poll_completion - get cq completion info
746 * @info: cq poll information returned
747 * @post_cq: update cq tail
749 static enum i40iw_status_code
i40iw_cq_poll_completion(struct i40iw_cq_uk
*cq
,
750 struct i40iw_cq_poll_info
*info
)
752 u64 comp_ctx
, qword0
, qword2
, qword3
, wqe_qword
;
754 struct i40iw_qp_uk
*qp
;
755 struct i40iw_ring
*pring
= NULL
;
756 u32 wqe_idx
, q_type
, array_idx
= 0;
757 enum i40iw_status_code ret_code
= 0;
758 bool move_cq_head
= true;
762 if (cq
->avoid_mem_cflct
)
763 cqe
= (u64
*)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(cq
);
765 cqe
= (u64
*)I40IW_GET_CURRENT_CQ_ELEMENT(cq
);
767 get_64bit_val(cqe
, 24, &qword3
);
768 polarity
= (u8
)RS_64(qword3
, I40IW_CQ_VALID
);
770 if (polarity
!= cq
->polarity
)
771 return I40IW_ERR_QUEUE_EMPTY
;
773 q_type
= (u8
)RS_64(qword3
, I40IW_CQ_SQ
);
774 info
->error
= (bool)RS_64(qword3
, I40IW_CQ_ERROR
);
775 info
->push_dropped
= (bool)RS_64(qword3
, I40IWCQ_PSHDROP
);
777 info
->comp_status
= I40IW_COMPL_STATUS_FLUSHED
;
778 info
->major_err
= (bool)RS_64(qword3
, I40IW_CQ_MAJERR
);
779 info
->minor_err
= (bool)RS_64(qword3
, I40IW_CQ_MINERR
);
781 info
->comp_status
= I40IW_COMPL_STATUS_SUCCESS
;
784 get_64bit_val(cqe
, 0, &qword0
);
785 get_64bit_val(cqe
, 16, &qword2
);
787 info
->tcp_seq_num
= (u32
)RS_64(qword0
, I40IWCQ_TCPSEQNUM
);
789 info
->qp_id
= (u32
)RS_64(qword2
, I40IWCQ_QPID
);
791 get_64bit_val(cqe
, 8, &comp_ctx
);
793 info
->solicited_event
= (bool)RS_64(qword3
, I40IWCQ_SOEVENT
);
794 info
->is_srq
= (bool)RS_64(qword3
, I40IWCQ_SRQ
);
796 qp
= (struct i40iw_qp_uk
*)(unsigned long)comp_ctx
;
798 ret_code
= I40IW_ERR_QUEUE_DESTROYED
;
801 wqe_idx
= (u32
)RS_64(qword3
, I40IW_CQ_WQEIDX
);
802 info
->qp_handle
= (i40iw_qp_handle
)(unsigned long)qp
;
804 if (q_type
== I40IW_CQE_QTYPE_RQ
) {
805 array_idx
= (wqe_idx
* 4) / qp
->rq_wqe_size_multiplier
;
806 if (info
->comp_status
== I40IW_COMPL_STATUS_FLUSHED
) {
807 info
->wr_id
= qp
->rq_wrid_array
[qp
->rq_ring
.tail
];
808 array_idx
= qp
->rq_ring
.tail
;
810 info
->wr_id
= qp
->rq_wrid_array
[array_idx
];
813 info
->op_type
= I40IW_OP_TYPE_REC
;
814 if (qword3
& I40IWCQ_STAG_MASK
) {
815 info
->stag_invalid_set
= true;
816 info
->inv_stag
= (u32
)RS_64(qword2
, I40IWCQ_INVSTAG
);
818 info
->stag_invalid_set
= false;
820 info
->bytes_xfered
= (u32
)RS_64(qword0
, I40IWCQ_PAYLDLEN
);
821 I40IW_RING_SET_TAIL(qp
->rq_ring
, array_idx
+ 1);
822 pring
= &qp
->rq_ring
;
824 if (qp
->first_sq_wq
) {
825 qp
->first_sq_wq
= false;
826 if (!wqe_idx
&& (qp
->sq_ring
.head
== qp
->sq_ring
.tail
)) {
827 I40IW_RING_MOVE_HEAD_NOCHECK(cq
->cq_ring
);
828 I40IW_RING_MOVE_TAIL(cq
->cq_ring
);
829 set_64bit_val(cq
->shadow_area
, 0,
830 I40IW_RING_GETCURRENT_HEAD(cq
->cq_ring
));
831 memset(info
, 0, sizeof(struct i40iw_cq_poll_info
));
832 return i40iw_cq_poll_completion(cq
, info
);
836 if (info
->comp_status
!= I40IW_COMPL_STATUS_FLUSHED
) {
837 info
->wr_id
= qp
->sq_wrtrk_array
[wqe_idx
].wrid
;
838 info
->bytes_xfered
= qp
->sq_wrtrk_array
[wqe_idx
].wr_len
;
840 info
->op_type
= (u8
)RS_64(qword3
, I40IWCQ_OP
);
841 sw_wqe
= qp
->sq_base
[wqe_idx
].elem
;
842 get_64bit_val(sw_wqe
, 24, &wqe_qword
);
844 addl_wqes
= qp
->sq_wrtrk_array
[wqe_idx
].wqe_size
/ I40IW_QP_WQE_MIN_SIZE
;
845 I40IW_RING_SET_TAIL(qp
->sq_ring
, (wqe_idx
+ addl_wqes
));
851 tail
= qp
->sq_ring
.tail
;
852 sw_wqe
= qp
->sq_base
[tail
].elem
;
853 get_64bit_val(sw_wqe
, 24, &wqe_qword
);
854 op_type
= (u8
)RS_64(wqe_qword
, I40IWQPSQ_OPCODE
);
855 info
->op_type
= op_type
;
856 addl_wqes
= qp
->sq_wrtrk_array
[tail
].wqe_size
/ I40IW_QP_WQE_MIN_SIZE
;
857 I40IW_RING_SET_TAIL(qp
->sq_ring
, (tail
+ addl_wqes
));
858 if (op_type
!= I40IWQP_OP_NOP
) {
859 info
->wr_id
= qp
->sq_wrtrk_array
[tail
].wrid
;
860 info
->bytes_xfered
= qp
->sq_wrtrk_array
[tail
].wr_len
;
865 pring
= &qp
->sq_ring
;
872 (info
->comp_status
== I40IW_COMPL_STATUS_FLUSHED
))
873 if (pring
&& (I40IW_RING_MORE_WORK(*pring
)))
874 move_cq_head
= false;
877 I40IW_RING_MOVE_HEAD_NOCHECK(cq
->cq_ring
);
879 if (I40IW_RING_GETCURRENT_HEAD(cq
->cq_ring
) == 0)
882 I40IW_RING_MOVE_TAIL(cq
->cq_ring
);
883 set_64bit_val(cq
->shadow_area
, 0,
884 I40IW_RING_GETCURRENT_HEAD(cq
->cq_ring
));
888 qword3
&= ~I40IW_CQ_WQEIDX_MASK
;
889 qword3
|= LS_64(pring
->tail
, I40IW_CQ_WQEIDX
);
890 set_64bit_val(cqe
, 24, qword3
);
897 * i40iw_get_wqe_shift - get shift count for maximum wqe size
898 * @sge: Maximum Scatter Gather Elements wqe
899 * @inline_data: Maximum inline data size
900 * @shift: Returns the shift needed based on sge
902 * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
903 * For 1 SGE or inline data <= 16, shift = 0 (wqe size of 32 bytes).
904 * For 2 or 3 SGEs or inline data <= 48, shift = 1 (wqe size of 64 bytes).
905 * Shift of 2 otherwise (wqe size of 128 bytes).
907 void i40iw_get_wqe_shift(u32 sge
, u32 inline_data
, u8
*shift
)
910 if (sge
> 1 || inline_data
> 16)
911 *shift
= (sge
< 4 && inline_data
<= 48) ? 1 : 2;
915 * i40iw_get_sqdepth - get SQ depth (quantas)
917 * @shift: shift which determines size of WQE
918 * @sqdepth: depth of SQ
921 enum i40iw_status_code
i40iw_get_sqdepth(u32 sq_size
, u8 shift
, u32
*sqdepth
)
923 *sqdepth
= roundup_pow_of_two((sq_size
<< shift
) + I40IW_SQ_RSVD
);
925 if (*sqdepth
< (I40IW_QP_SW_MIN_WQSIZE
<< shift
))
926 *sqdepth
= I40IW_QP_SW_MIN_WQSIZE
<< shift
;
927 else if (*sqdepth
> I40IW_QP_SW_MAX_SQ_QUANTAS
)
928 return I40IW_ERR_INVALID_SIZE
;
934 * i40iw_get_rq_depth - get RQ depth (quantas)
936 * @shift: shift which determines size of WQE
937 * @rqdepth: depth of RQ
940 enum i40iw_status_code
i40iw_get_rqdepth(u32 rq_size
, u8 shift
, u32
*rqdepth
)
942 *rqdepth
= roundup_pow_of_two((rq_size
<< shift
) + I40IW_RQ_RSVD
);
944 if (*rqdepth
< (I40IW_QP_SW_MIN_WQSIZE
<< shift
))
945 *rqdepth
= I40IW_QP_SW_MIN_WQSIZE
<< shift
;
946 else if (*rqdepth
> I40IW_QP_SW_MAX_RQ_QUANTAS
)
947 return I40IW_ERR_INVALID_SIZE
;
952 static const struct i40iw_qp_uk_ops iw_qp_uk_ops
= {
953 .iw_qp_post_wr
= i40iw_qp_post_wr
,
954 .iw_qp_ring_push_db
= i40iw_qp_ring_push_db
,
955 .iw_rdma_write
= i40iw_rdma_write
,
956 .iw_rdma_read
= i40iw_rdma_read
,
957 .iw_send
= i40iw_send
,
958 .iw_inline_rdma_write
= i40iw_inline_rdma_write
,
959 .iw_inline_send
= i40iw_inline_send
,
960 .iw_stag_local_invalidate
= i40iw_stag_local_invalidate
,
961 .iw_mw_bind
= i40iw_mw_bind
,
962 .iw_post_receive
= i40iw_post_receive
,
963 .iw_post_nop
= i40iw_nop
966 static const struct i40iw_cq_ops iw_cq_ops
= {
967 .iw_cq_request_notification
= i40iw_cq_request_notification
,
968 .iw_cq_poll_completion
= i40iw_cq_poll_completion
,
969 .iw_cq_post_entries
= i40iw_cq_post_entries
,
970 .iw_cq_clean
= i40iw_clean_cq
973 static const struct i40iw_device_uk_ops iw_device_uk_ops
= {
974 .iwarp_cq_uk_init
= i40iw_cq_uk_init
,
975 .iwarp_qp_uk_init
= i40iw_qp_uk_init
,
979 * i40iw_qp_uk_init - initialize shared qp
980 * @qp: hw qp (user and kernel)
981 * @info: qp initialization info
983 * initializes the vars used in both user and kernel mode.
984 * size of the wqe depends on numbers of max. fragements
985 * allowed. Then size of wqe * the number of wqes should be the
986 * amount of memory allocated for sq and rq. If srq is used,
987 * then rq_base will point to one rq wqe only (not the whole
990 enum i40iw_status_code
i40iw_qp_uk_init(struct i40iw_qp_uk
*qp
,
991 struct i40iw_qp_uk_init_info
*info
)
993 enum i40iw_status_code ret_code
= 0;
997 if (info
->max_sq_frag_cnt
> I40IW_MAX_WQ_FRAGMENT_COUNT
)
998 return I40IW_ERR_INVALID_FRAG_COUNT
;
1000 if (info
->max_rq_frag_cnt
> I40IW_MAX_WQ_FRAGMENT_COUNT
)
1001 return I40IW_ERR_INVALID_FRAG_COUNT
;
1002 i40iw_get_wqe_shift(info
->max_sq_frag_cnt
, info
->max_inline_data
, &sqshift
);
1004 qp
->sq_base
= info
->sq
;
1005 qp
->rq_base
= info
->rq
;
1006 qp
->shadow_area
= info
->shadow_area
;
1007 qp
->sq_wrtrk_array
= info
->sq_wrtrk_array
;
1008 qp
->rq_wrid_array
= info
->rq_wrid_array
;
1010 qp
->wqe_alloc_reg
= info
->wqe_alloc_reg
;
1011 qp
->qp_id
= info
->qp_id
;
1013 qp
->sq_size
= info
->sq_size
;
1014 qp
->push_db
= info
->push_db
;
1015 qp
->push_wqe
= info
->push_wqe
;
1017 qp
->max_sq_frag_cnt
= info
->max_sq_frag_cnt
;
1018 sq_ring_size
= qp
->sq_size
<< sqshift
;
1020 I40IW_RING_INIT(qp
->sq_ring
, sq_ring_size
);
1021 I40IW_RING_INIT(qp
->initial_ring
, sq_ring_size
);
1022 I40IW_RING_MOVE_HEAD(qp
->sq_ring
, ret_code
);
1023 I40IW_RING_MOVE_TAIL(qp
->sq_ring
);
1024 I40IW_RING_MOVE_HEAD(qp
->initial_ring
, ret_code
);
1025 qp
->swqe_polarity
= 1;
1026 qp
->first_sq_wq
= true;
1027 qp
->swqe_polarity_deferred
= 1;
1028 qp
->rwqe_polarity
= 0;
1031 qp
->rq_size
= info
->rq_size
;
1032 qp
->max_rq_frag_cnt
= info
->max_rq_frag_cnt
;
1033 I40IW_RING_INIT(qp
->rq_ring
, qp
->rq_size
);
1034 switch (info
->abi_ver
) {
1036 i40iw_get_wqe_shift(info
->max_rq_frag_cnt
, 0, &rqshift
);
1038 case 5: /* fallthrough until next ABI version */
1040 rqshift
= I40IW_MAX_RQ_WQE_SHIFT
;
1043 qp
->rq_wqe_size
= rqshift
;
1044 qp
->rq_wqe_size_multiplier
= 4 << rqshift
;
1046 qp
->ops
= iw_qp_uk_ops
;
1052 * i40iw_cq_uk_init - initialize shared cq (user and kernel)
1054 * @info: hw cq initialization info
1056 enum i40iw_status_code
i40iw_cq_uk_init(struct i40iw_cq_uk
*cq
,
1057 struct i40iw_cq_uk_init_info
*info
)
1059 if ((info
->cq_size
< I40IW_MIN_CQ_SIZE
) ||
1060 (info
->cq_size
> I40IW_MAX_CQ_SIZE
))
1061 return I40IW_ERR_INVALID_SIZE
;
1062 cq
->cq_base
= (struct i40iw_cqe
*)info
->cq_base
;
1063 cq
->cq_id
= info
->cq_id
;
1064 cq
->cq_size
= info
->cq_size
;
1065 cq
->cqe_alloc_reg
= info
->cqe_alloc_reg
;
1066 cq
->shadow_area
= info
->shadow_area
;
1067 cq
->avoid_mem_cflct
= info
->avoid_mem_cflct
;
1069 I40IW_RING_INIT(cq
->cq_ring
, cq
->cq_size
);
1071 cq
->ops
= iw_cq_ops
;
1077 * i40iw_device_init_uk - setup routines for iwarp shared device
1078 * @dev: iwarp shared (user and kernel)
1080 void i40iw_device_init_uk(struct i40iw_dev_uk
*dev
)
1082 dev
->ops_uk
= iw_device_uk_ops
;
1086 * i40iw_clean_cq - clean cq entries
1087 * @ queue completion context
1090 void i40iw_clean_cq(void *queue
, struct i40iw_cq_uk
*cq
)
1093 u64 qword3
, comp_ctx
;
1097 cq_head
= cq
->cq_ring
.head
;
1098 temp
= cq
->polarity
;
1100 if (cq
->avoid_mem_cflct
)
1101 cqe
= (u64
*)&(((struct i40iw_extended_cqe
*)cq
->cq_base
)[cq_head
]);
1103 cqe
= (u64
*)&cq
->cq_base
[cq_head
];
1104 get_64bit_val(cqe
, 24, &qword3
);
1105 polarity
= (u8
)RS_64(qword3
, I40IW_CQ_VALID
);
1107 if (polarity
!= temp
)
1110 get_64bit_val(cqe
, 8, &comp_ctx
);
1111 if ((void *)(unsigned long)comp_ctx
== queue
)
1112 set_64bit_val(cqe
, 8, 0);
1114 cq_head
= (cq_head
+ 1) % cq
->cq_ring
.size
;
1121 * i40iw_nop - send a nop
1123 * @wr_id: work request id
1124 * @signaled: flag if signaled for completion
1125 * @post_sq: flag to post sq
1127 enum i40iw_status_code
i40iw_nop(struct i40iw_qp_uk
*qp
,
1135 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, I40IW_QP_WQE_MIN_SIZE
, 0, wr_id
);
1137 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
1138 set_64bit_val(wqe
, 0, 0);
1139 set_64bit_val(wqe
, 8, 0);
1140 set_64bit_val(wqe
, 16, 0);
1142 header
= LS_64(I40IWQP_OP_NOP
, I40IWQPSQ_OPCODE
) |
1143 LS_64(signaled
, I40IWQPSQ_SIGCOMPL
) |
1144 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
1146 wmb(); /* make sure WQE is populated before valid bit is set */
1148 set_64bit_val(wqe
, 24, header
);
1150 i40iw_qp_post_wr(qp
);
1156 * i40iw_fragcnt_to_wqesize_sq - calculate wqe size based on fragment count for SQ
1157 * @frag_cnt: number of fragments
1158 * @wqe_size: size of sq wqe returned
1160 enum i40iw_status_code
i40iw_fragcnt_to_wqesize_sq(u32 frag_cnt
, u8
*wqe_size
)
1165 *wqe_size
= I40IW_QP_WQE_MIN_SIZE
;
1180 return I40IW_ERR_INVALID_FRAG_COUNT
;
1187 * i40iw_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
1188 * @frag_cnt: number of fragments
1189 * @wqe_size: size of rq wqe returned
1191 enum i40iw_status_code
i40iw_fragcnt_to_wqesize_rq(u32 frag_cnt
, u8
*wqe_size
)
1209 return I40IW_ERR_INVALID_FRAG_COUNT
;
1216 * i40iw_inline_data_size_to_wqesize - based on inline data, wqe size
1217 * @data_size: data size for inline
1218 * @wqe_size: size of sq wqe returned
1220 enum i40iw_status_code
i40iw_inline_data_size_to_wqesize(u32 data_size
,
1223 if (data_size
> I40IW_MAX_INLINE_DATA_SIZE
)
1224 return I40IW_ERR_INVALID_INLINE_DATA_SIZE
;
1226 if (data_size
<= 16)
1227 *wqe_size
= I40IW_QP_WQE_MIN_SIZE
;