1 /*******************************************************************************
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 *******************************************************************************/
35 #include "i40iw_osdep.h"
36 #include "i40iw_status.h"
38 #include "i40iw_user.h"
39 #include "i40iw_register.h"
41 static u32 nop_signature
= 0x55550000;
44 * i40iw_nop_1 - insert a nop wqe and move head. no post work
47 static enum i40iw_status_code
i40iw_nop_1(struct i40iw_qp_uk
*qp
)
51 u32 wqe_idx
, peek_head
;
52 bool signaled
= false;
54 if (!qp
->sq_ring
.head
)
55 return I40IW_ERR_PARAM
;
57 wqe_idx
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
58 wqe
= qp
->sq_base
[wqe_idx
].elem
;
59 peek_head
= (qp
->sq_ring
.head
+ 1) % qp
->sq_ring
.size
;
60 wqe_0
= qp
->sq_base
[peek_head
].elem
;
62 wqe_0
[3] = LS_64(!qp
->swqe_polarity
, I40IWQPSQ_VALID
);
64 wqe_0
[3] = LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
66 set_64bit_val(wqe
, 0, 0);
67 set_64bit_val(wqe
, 8, 0);
68 set_64bit_val(wqe
, 16, 0);
70 header
= LS_64(I40IWQP_OP_NOP
, I40IWQPSQ_OPCODE
) |
71 LS_64(signaled
, I40IWQPSQ_SIGCOMPL
) |
72 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
) | nop_signature
++;
74 wmb(); /* Memory barrier to ensure data is written before valid bit is set */
76 set_64bit_val(wqe
, 24, header
);
81 * i40iw_qp_post_wr - post wr to hrdware
84 void i40iw_qp_post_wr(struct i40iw_qp_uk
*qp
)
90 mb(); /* valid bit is written and loads completed before reading shadow */
92 /* read the doorbell shadow area */
93 get_64bit_val(qp
->shadow_area
, 0, &temp
);
95 hw_sq_tail
= (u32
)RS_64(temp
, I40IW_QP_DBSA_HW_SQ_TAIL
);
96 sw_sq_head
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
97 if (sw_sq_head
!= hw_sq_tail
) {
98 if (sw_sq_head
> qp
->initial_ring
.head
) {
99 if ((hw_sq_tail
>= qp
->initial_ring
.head
) &&
100 (hw_sq_tail
< sw_sq_head
)) {
101 writel(qp
->qp_id
, qp
->wqe_alloc_reg
);
103 } else if (sw_sq_head
!= qp
->initial_ring
.head
) {
104 if ((hw_sq_tail
>= qp
->initial_ring
.head
) ||
105 (hw_sq_tail
< sw_sq_head
)) {
106 writel(qp
->qp_id
, qp
->wqe_alloc_reg
);
111 qp
->initial_ring
.head
= qp
->sq_ring
.head
;
115 * i40iw_qp_ring_push_db - ring qp doorbell
117 * @wqe_idx: wqe index
119 static void i40iw_qp_ring_push_db(struct i40iw_qp_uk
*qp
, u32 wqe_idx
)
121 set_32bit_val(qp
->push_db
, 0, LS_32((wqe_idx
>> 2), I40E_PFPE_WQEALLOC_WQE_DESC_INDEX
) | qp
->qp_id
);
122 qp
->initial_ring
.head
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
126 * i40iw_qp_get_next_send_wqe - return next wqe ptr
128 * @wqe_idx: return wqe index
129 * @wqe_size: size of sq wqe
131 u64
*i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk
*qp
,
139 enum i40iw_status_code ret_code
= 0;
140 u8 nop_wqe_cnt
= 0, i
;
143 *wqe_idx
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
146 qp
->swqe_polarity
= !qp
->swqe_polarity
;
147 wqe_ptr
= (uintptr_t)qp
->sq_base
[*wqe_idx
].elem
;
148 offset
= (u16
)(wqe_ptr
) & 0x7F;
149 if ((offset
+ wqe_size
) > I40IW_QP_WQE_MAX_SIZE
) {
150 nop_wqe_cnt
= (u8
)(I40IW_QP_WQE_MAX_SIZE
- offset
) / I40IW_QP_WQE_MIN_SIZE
;
151 for (i
= 0; i
< nop_wqe_cnt
; i
++) {
153 I40IW_RING_MOVE_HEAD(qp
->sq_ring
, ret_code
);
158 *wqe_idx
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
160 qp
->swqe_polarity
= !qp
->swqe_polarity
;
162 for (i
= 0; i
< wqe_size
/ I40IW_QP_WQE_MIN_SIZE
; i
++) {
163 I40IW_RING_MOVE_HEAD(qp
->sq_ring
, ret_code
);
168 wqe
= qp
->sq_base
[*wqe_idx
].elem
;
170 peek_head
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
171 wqe_0
= qp
->sq_base
[peek_head
].elem
;
173 wqe_0
[3] = LS_64(!qp
->swqe_polarity
, I40IWQPSQ_VALID
);
178 * i40iw_set_fragment - set fragment in wqe
179 * @wqe: wqe for setting fragment
180 * @offset: offset value
181 * @sge: sge length and stag
183 static void i40iw_set_fragment(u64
*wqe
, u32 offset
, struct i40iw_sge
*sge
)
186 set_64bit_val(wqe
, offset
, LS_64(sge
->tag_off
, I40IWQPSQ_FRAG_TO
));
187 set_64bit_val(wqe
, (offset
+ 8),
188 (LS_64(sge
->len
, I40IWQPSQ_FRAG_LEN
) |
189 LS_64(sge
->stag
, I40IWQPSQ_FRAG_STAG
)));
194 * i40iw_qp_get_next_recv_wqe - get next qp's rcv wqe
196 * @wqe_idx: return wqe index
198 u64
*i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk
*qp
, u32
*wqe_idx
)
201 enum i40iw_status_code ret_code
;
203 if (I40IW_RING_FULL_ERR(qp
->rq_ring
))
206 I40IW_ATOMIC_RING_MOVE_HEAD(qp
->rq_ring
, *wqe_idx
, ret_code
);
210 qp
->rwqe_polarity
= !qp
->rwqe_polarity
;
211 /* rq_wqe_size_multiplier is no of qwords in one rq wqe */
212 wqe
= qp
->rq_base
[*wqe_idx
* (qp
->rq_wqe_size_multiplier
>> 2)].elem
;
218 * i40iw_rdma_write - rdma write operation
220 * @info: post sq information
221 * @post_sq: flag to post sq
223 static enum i40iw_status_code
i40iw_rdma_write(struct i40iw_qp_uk
*qp
,
224 struct i40iw_post_sq_info
*info
,
229 struct i40iw_rdma_write
*op_info
;
231 u32 total_size
= 0, byte_off
;
232 enum i40iw_status_code ret_code
;
233 bool read_fence
= false;
236 op_info
= &info
->op
.rdma_write
;
237 if (op_info
->num_lo_sges
> qp
->max_sq_frag_cnt
)
238 return I40IW_ERR_INVALID_FRAG_COUNT
;
240 for (i
= 0; i
< op_info
->num_lo_sges
; i
++)
241 total_size
+= op_info
->lo_sg_list
[i
].len
;
243 if (total_size
> I40IW_MAX_OUTBOUND_MESSAGE_SIZE
)
244 return I40IW_ERR_QP_INVALID_MSG_SIZE
;
246 read_fence
|= info
->read_fence
;
248 ret_code
= i40iw_fragcnt_to_wqesize_sq(op_info
->num_lo_sges
, &wqe_size
);
252 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, wqe_size
);
254 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
256 qp
->sq_wrtrk_array
[wqe_idx
].wrid
= info
->wr_id
;
257 qp
->sq_wrtrk_array
[wqe_idx
].wr_len
= total_size
;
258 set_64bit_val(wqe
, 16,
259 LS_64(op_info
->rem_addr
.tag_off
, I40IWQPSQ_FRAG_TO
));
260 if (!op_info
->rem_addr
.stag
)
261 return I40IW_ERR_BAD_STAG
;
263 header
= LS_64(op_info
->rem_addr
.stag
, I40IWQPSQ_REMSTAG
) |
264 LS_64(I40IWQP_OP_RDMA_WRITE
, I40IWQPSQ_OPCODE
) |
265 LS_64((op_info
->num_lo_sges
> 1 ? (op_info
->num_lo_sges
- 1) : 0), I40IWQPSQ_ADDFRAGCNT
) |
266 LS_64(read_fence
, I40IWQPSQ_READFENCE
) |
267 LS_64(info
->local_fence
, I40IWQPSQ_LOCALFENCE
) |
268 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
269 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
271 i40iw_set_fragment(wqe
, 0, op_info
->lo_sg_list
);
273 for (i
= 1; i
< op_info
->num_lo_sges
; i
++) {
274 byte_off
= 32 + (i
- 1) * 16;
275 i40iw_set_fragment(wqe
, byte_off
, &op_info
->lo_sg_list
[i
]);
278 wmb(); /* make sure WQE is populated before valid bit is set */
280 set_64bit_val(wqe
, 24, header
);
283 i40iw_qp_post_wr(qp
);
289 * i40iw_rdma_read - rdma read command
291 * @info: post sq information
292 * @inv_stag: flag for inv_stag
293 * @post_sq: flag to post sq
295 static enum i40iw_status_code
i40iw_rdma_read(struct i40iw_qp_uk
*qp
,
296 struct i40iw_post_sq_info
*info
,
301 struct i40iw_rdma_read
*op_info
;
304 enum i40iw_status_code ret_code
;
306 bool local_fence
= false;
308 op_info
= &info
->op
.rdma_read
;
309 ret_code
= i40iw_fragcnt_to_wqesize_sq(1, &wqe_size
);
312 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, wqe_size
);
314 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
316 qp
->sq_wrtrk_array
[wqe_idx
].wrid
= info
->wr_id
;
317 qp
->sq_wrtrk_array
[wqe_idx
].wr_len
= op_info
->lo_addr
.len
;
318 local_fence
|= info
->local_fence
;
320 set_64bit_val(wqe
, 16, LS_64(op_info
->rem_addr
.tag_off
, I40IWQPSQ_FRAG_TO
));
321 header
= LS_64(op_info
->rem_addr
.stag
, I40IWQPSQ_REMSTAG
) |
322 LS_64((inv_stag
? I40IWQP_OP_RDMA_READ_LOC_INV
: I40IWQP_OP_RDMA_READ
), I40IWQPSQ_OPCODE
) |
323 LS_64(info
->read_fence
, I40IWQPSQ_READFENCE
) |
324 LS_64(local_fence
, I40IWQPSQ_LOCALFENCE
) |
325 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
326 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
328 i40iw_set_fragment(wqe
, 0, &op_info
->lo_addr
);
330 wmb(); /* make sure WQE is populated before valid bit is set */
332 set_64bit_val(wqe
, 24, header
);
334 i40iw_qp_post_wr(qp
);
340 * i40iw_send - rdma send command
342 * @info: post sq information
343 * @stag_to_inv: stag_to_inv value
344 * @post_sq: flag to post sq
346 static enum i40iw_status_code
i40iw_send(struct i40iw_qp_uk
*qp
,
347 struct i40iw_post_sq_info
*info
,
352 struct i40iw_post_send
*op_info
;
354 u32 i
, wqe_idx
, total_size
= 0, byte_off
;
355 enum i40iw_status_code ret_code
;
356 bool read_fence
= false;
359 op_info
= &info
->op
.send
;
360 if (qp
->max_sq_frag_cnt
< op_info
->num_sges
)
361 return I40IW_ERR_INVALID_FRAG_COUNT
;
363 for (i
= 0; i
< op_info
->num_sges
; i
++)
364 total_size
+= op_info
->sg_list
[i
].len
;
365 ret_code
= i40iw_fragcnt_to_wqesize_sq(op_info
->num_sges
, &wqe_size
);
369 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, wqe_size
);
371 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
373 read_fence
|= info
->read_fence
;
374 qp
->sq_wrtrk_array
[wqe_idx
].wrid
= info
->wr_id
;
375 qp
->sq_wrtrk_array
[wqe_idx
].wr_len
= total_size
;
376 set_64bit_val(wqe
, 16, 0);
377 header
= LS_64(stag_to_inv
, I40IWQPSQ_REMSTAG
) |
378 LS_64(info
->op_type
, I40IWQPSQ_OPCODE
) |
379 LS_64((op_info
->num_sges
> 1 ? (op_info
->num_sges
- 1) : 0),
380 I40IWQPSQ_ADDFRAGCNT
) |
381 LS_64(read_fence
, I40IWQPSQ_READFENCE
) |
382 LS_64(info
->local_fence
, I40IWQPSQ_LOCALFENCE
) |
383 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
384 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
386 i40iw_set_fragment(wqe
, 0, op_info
->sg_list
);
388 for (i
= 1; i
< op_info
->num_sges
; i
++) {
389 byte_off
= 32 + (i
- 1) * 16;
390 i40iw_set_fragment(wqe
, byte_off
, &op_info
->sg_list
[i
]);
393 wmb(); /* make sure WQE is populated before valid bit is set */
395 set_64bit_val(wqe
, 24, header
);
397 i40iw_qp_post_wr(qp
);
403 * i40iw_inline_rdma_write - inline rdma write operation
405 * @info: post sq information
406 * @post_sq: flag to post sq
408 static enum i40iw_status_code
i40iw_inline_rdma_write(struct i40iw_qp_uk
*qp
,
409 struct i40iw_post_sq_info
*info
,
414 struct i40iw_inline_rdma_write
*op_info
;
418 enum i40iw_status_code ret_code
;
419 bool read_fence
= false;
422 op_info
= &info
->op
.inline_rdma_write
;
423 if (op_info
->len
> I40IW_MAX_INLINE_DATA_SIZE
)
424 return I40IW_ERR_INVALID_IMM_DATA_SIZE
;
426 ret_code
= i40iw_inline_data_size_to_wqesize(op_info
->len
, &wqe_size
);
430 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, wqe_size
);
432 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
434 read_fence
|= info
->read_fence
;
435 qp
->sq_wrtrk_array
[wqe_idx
].wrid
= info
->wr_id
;
436 qp
->sq_wrtrk_array
[wqe_idx
].wr_len
= op_info
->len
;
437 set_64bit_val(wqe
, 16,
438 LS_64(op_info
->rem_addr
.tag_off
, I40IWQPSQ_FRAG_TO
));
440 header
= LS_64(op_info
->rem_addr
.stag
, I40IWQPSQ_REMSTAG
) |
441 LS_64(I40IWQP_OP_RDMA_WRITE
, I40IWQPSQ_OPCODE
) |
442 LS_64(op_info
->len
, I40IWQPSQ_INLINEDATALEN
) |
443 LS_64(1, I40IWQPSQ_INLINEDATAFLAG
) |
444 LS_64((qp
->push_db
? 1 : 0), I40IWQPSQ_PUSHWQE
) |
445 LS_64(read_fence
, I40IWQPSQ_READFENCE
) |
446 LS_64(info
->local_fence
, I40IWQPSQ_LOCALFENCE
) |
447 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
448 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
451 src
= (u8
*)(op_info
->data
);
453 if (op_info
->len
<= 16) {
454 for (i
= 0; i
< op_info
->len
; i
++, src
++, dest
++)
457 for (i
= 0; i
< 16; i
++, src
++, dest
++)
459 dest
= (u8
*)wqe
+ 32;
460 for (; i
< op_info
->len
; i
++, src
++, dest
++)
464 wmb(); /* make sure WQE is populated before valid bit is set */
466 set_64bit_val(wqe
, 24, header
);
469 push
= (u64
*)((uintptr_t)qp
->push_wqe
+ (wqe_idx
& 0x3) * 0x20);
470 memcpy(push
, wqe
, (op_info
->len
> 16) ? op_info
->len
+ 16 : 32);
471 i40iw_qp_ring_push_db(qp
, wqe_idx
);
474 i40iw_qp_post_wr(qp
);
481 * i40iw_inline_send - inline send operation
483 * @info: post sq information
484 * @stag_to_inv: remote stag
485 * @post_sq: flag to post sq
487 static enum i40iw_status_code
i40iw_inline_send(struct i40iw_qp_uk
*qp
,
488 struct i40iw_post_sq_info
*info
,
494 struct i40iw_post_inline_send
*op_info
;
497 enum i40iw_status_code ret_code
;
498 bool read_fence
= false;
502 op_info
= &info
->op
.inline_send
;
503 if (op_info
->len
> I40IW_MAX_INLINE_DATA_SIZE
)
504 return I40IW_ERR_INVALID_IMM_DATA_SIZE
;
506 ret_code
= i40iw_inline_data_size_to_wqesize(op_info
->len
, &wqe_size
);
510 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, wqe_size
);
512 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
514 read_fence
|= info
->read_fence
;
516 qp
->sq_wrtrk_array
[wqe_idx
].wrid
= info
->wr_id
;
517 qp
->sq_wrtrk_array
[wqe_idx
].wr_len
= op_info
->len
;
518 header
= LS_64(stag_to_inv
, I40IWQPSQ_REMSTAG
) |
519 LS_64(info
->op_type
, I40IWQPSQ_OPCODE
) |
520 LS_64(op_info
->len
, I40IWQPSQ_INLINEDATALEN
) |
521 LS_64(1, I40IWQPSQ_INLINEDATAFLAG
) |
522 LS_64((qp
->push_db
? 1 : 0), I40IWQPSQ_PUSHWQE
) |
523 LS_64(read_fence
, I40IWQPSQ_READFENCE
) |
524 LS_64(info
->local_fence
, I40IWQPSQ_LOCALFENCE
) |
525 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
526 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
529 src
= (u8
*)(op_info
->data
);
531 if (op_info
->len
<= 16) {
532 for (i
= 0; i
< op_info
->len
; i
++, src
++, dest
++)
535 for (i
= 0; i
< 16; i
++, src
++, dest
++)
537 dest
= (u8
*)wqe
+ 32;
538 for (; i
< op_info
->len
; i
++, src
++, dest
++)
542 wmb(); /* make sure WQE is populated before valid bit is set */
544 set_64bit_val(wqe
, 24, header
);
547 push
= (u64
*)((uintptr_t)qp
->push_wqe
+ (wqe_idx
& 0x3) * 0x20);
548 memcpy(push
, wqe
, (op_info
->len
> 16) ? op_info
->len
+ 16 : 32);
549 i40iw_qp_ring_push_db(qp
, wqe_idx
);
552 i40iw_qp_post_wr(qp
);
559 * i40iw_stag_local_invalidate - stag invalidate operation
561 * @info: post sq information
562 * @post_sq: flag to post sq
564 static enum i40iw_status_code
i40iw_stag_local_invalidate(struct i40iw_qp_uk
*qp
,
565 struct i40iw_post_sq_info
*info
,
569 struct i40iw_inv_local_stag
*op_info
;
572 bool local_fence
= false;
574 op_info
= &info
->op
.inv_local_stag
;
575 local_fence
= info
->local_fence
;
577 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, I40IW_QP_WQE_MIN_SIZE
);
579 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
581 qp
->sq_wrtrk_array
[wqe_idx
].wrid
= info
->wr_id
;
582 qp
->sq_wrtrk_array
[wqe_idx
].wr_len
= 0;
583 set_64bit_val(wqe
, 0, 0);
584 set_64bit_val(wqe
, 8,
585 LS_64(op_info
->target_stag
, I40IWQPSQ_LOCSTAG
));
586 set_64bit_val(wqe
, 16, 0);
587 header
= LS_64(I40IW_OP_TYPE_INV_STAG
, I40IWQPSQ_OPCODE
) |
588 LS_64(info
->read_fence
, I40IWQPSQ_READFENCE
) |
589 LS_64(local_fence
, I40IWQPSQ_LOCALFENCE
) |
590 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
591 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
593 wmb(); /* make sure WQE is populated before valid bit is set */
595 set_64bit_val(wqe
, 24, header
);
598 i40iw_qp_post_wr(qp
);
604 * i40iw_mw_bind - Memory Window bind operation
606 * @info: post sq information
607 * @post_sq: flag to post sq
609 static enum i40iw_status_code
i40iw_mw_bind(struct i40iw_qp_uk
*qp
,
610 struct i40iw_post_sq_info
*info
,
614 struct i40iw_bind_window
*op_info
;
617 bool local_fence
= false;
619 op_info
= &info
->op
.bind_window
;
621 local_fence
|= info
->local_fence
;
622 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, I40IW_QP_WQE_MIN_SIZE
);
624 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
626 qp
->sq_wrtrk_array
[wqe_idx
].wrid
= info
->wr_id
;
627 qp
->sq_wrtrk_array
[wqe_idx
].wr_len
= 0;
628 set_64bit_val(wqe
, 0, (uintptr_t)op_info
->va
);
629 set_64bit_val(wqe
, 8,
630 LS_64(op_info
->mr_stag
, I40IWQPSQ_PARENTMRSTAG
) |
631 LS_64(op_info
->mw_stag
, I40IWQPSQ_MWSTAG
));
632 set_64bit_val(wqe
, 16, op_info
->bind_length
);
633 header
= LS_64(I40IW_OP_TYPE_BIND_MW
, I40IWQPSQ_OPCODE
) |
634 LS_64(((op_info
->enable_reads
<< 2) |
635 (op_info
->enable_writes
<< 3)),
636 I40IWQPSQ_STAGRIGHTS
) |
637 LS_64((op_info
->addressing_type
== I40IW_ADDR_TYPE_VA_BASED
? 1 : 0),
638 I40IWQPSQ_VABASEDTO
) |
639 LS_64(info
->read_fence
, I40IWQPSQ_READFENCE
) |
640 LS_64(local_fence
, I40IWQPSQ_LOCALFENCE
) |
641 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
642 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
644 wmb(); /* make sure WQE is populated before valid bit is set */
646 set_64bit_val(wqe
, 24, header
);
649 i40iw_qp_post_wr(qp
);
655 * i40iw_post_receive - post receive wqe
657 * @info: post rq information
659 static enum i40iw_status_code
i40iw_post_receive(struct i40iw_qp_uk
*qp
,
660 struct i40iw_post_rq_info
*info
)
664 u32 total_size
= 0, wqe_idx
, i
, byte_off
;
666 if (qp
->max_rq_frag_cnt
< info
->num_sges
)
667 return I40IW_ERR_INVALID_FRAG_COUNT
;
668 for (i
= 0; i
< info
->num_sges
; i
++)
669 total_size
+= info
->sg_list
[i
].len
;
670 wqe
= i40iw_qp_get_next_recv_wqe(qp
, &wqe_idx
);
672 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
674 qp
->rq_wrid_array
[wqe_idx
] = info
->wr_id
;
675 set_64bit_val(wqe
, 16, 0);
677 header
= LS_64((info
->num_sges
> 1 ? (info
->num_sges
- 1) : 0),
678 I40IWQPSQ_ADDFRAGCNT
) |
679 LS_64(qp
->rwqe_polarity
, I40IWQPSQ_VALID
);
681 i40iw_set_fragment(wqe
, 0, info
->sg_list
);
683 for (i
= 1; i
< info
->num_sges
; i
++) {
684 byte_off
= 32 + (i
- 1) * 16;
685 i40iw_set_fragment(wqe
, byte_off
, &info
->sg_list
[i
]);
688 wmb(); /* make sure WQE is populated before valid bit is set */
690 set_64bit_val(wqe
, 24, header
);
696 * i40iw_cq_request_notification - cq notification request (door bell)
698 * @cq_notify: notification type
700 static void i40iw_cq_request_notification(struct i40iw_cq_uk
*cq
,
701 enum i40iw_completion_notify cq_notify
)
709 get_64bit_val(cq
->shadow_area
, 32, &temp_val
);
710 arm_seq_num
= (u8
)RS_64(temp_val
, I40IW_CQ_DBSA_ARM_SEQ_NUM
);
713 sw_cq_sel
= (u16
)RS_64(temp_val
, I40IW_CQ_DBSA_SW_CQ_SELECT
);
714 arm_next_se
= (u8
)RS_64(temp_val
, I40IW_CQ_DBSA_ARM_NEXT_SE
);
716 if (cq_notify
== IW_CQ_COMPL_EVENT
)
718 temp_val
= LS_64(arm_seq_num
, I40IW_CQ_DBSA_ARM_SEQ_NUM
) |
719 LS_64(sw_cq_sel
, I40IW_CQ_DBSA_SW_CQ_SELECT
) |
720 LS_64(arm_next_se
, I40IW_CQ_DBSA_ARM_NEXT_SE
) |
721 LS_64(arm_next
, I40IW_CQ_DBSA_ARM_NEXT
);
723 set_64bit_val(cq
->shadow_area
, 32, temp_val
);
725 wmb(); /* make sure WQE is populated before valid bit is set */
727 writel(cq
->cq_id
, cq
->cqe_alloc_reg
);
731 * i40iw_cq_post_entries - update tail in shadow memory
733 * @count: # of entries processed
735 static enum i40iw_status_code
i40iw_cq_post_entries(struct i40iw_cq_uk
*cq
,
738 I40IW_RING_MOVE_TAIL_BY_COUNT(cq
->cq_ring
, count
);
739 set_64bit_val(cq
->shadow_area
, 0,
740 I40IW_RING_GETCURRENT_HEAD(cq
->cq_ring
));
745 * i40iw_cq_poll_completion - get cq completion info
747 * @info: cq poll information returned
748 * @post_cq: update cq tail
750 static enum i40iw_status_code
i40iw_cq_poll_completion(struct i40iw_cq_uk
*cq
,
751 struct i40iw_cq_poll_info
*info
,
754 u64 comp_ctx
, qword0
, qword2
, qword3
, wqe_qword
;
756 struct i40iw_qp_uk
*qp
;
757 struct i40iw_ring
*pring
= NULL
;
758 u32 wqe_idx
, q_type
, array_idx
= 0;
759 enum i40iw_status_code ret_code
= 0;
760 enum i40iw_status_code ret_code2
= 0;
761 bool move_cq_head
= true;
763 u8 addl_frag_cnt
, addl_wqes
= 0;
765 if (cq
->avoid_mem_cflct
)
766 cqe
= (u64
*)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(cq
);
768 cqe
= (u64
*)I40IW_GET_CURRENT_CQ_ELEMENT(cq
);
770 get_64bit_val(cqe
, 24, &qword3
);
771 polarity
= (u8
)RS_64(qword3
, I40IW_CQ_VALID
);
773 if (polarity
!= cq
->polarity
)
774 return I40IW_ERR_QUEUE_EMPTY
;
776 q_type
= (u8
)RS_64(qword3
, I40IW_CQ_SQ
);
777 info
->error
= (bool)RS_64(qword3
, I40IW_CQ_ERROR
);
778 info
->push_dropped
= (bool)RS_64(qword3
, I40IWCQ_PSHDROP
);
780 info
->comp_status
= I40IW_COMPL_STATUS_FLUSHED
;
781 info
->major_err
= (bool)RS_64(qword3
, I40IW_CQ_MAJERR
);
782 info
->minor_err
= (bool)RS_64(qword3
, I40IW_CQ_MINERR
);
784 info
->comp_status
= I40IW_COMPL_STATUS_SUCCESS
;
787 get_64bit_val(cqe
, 0, &qword0
);
788 get_64bit_val(cqe
, 16, &qword2
);
790 info
->tcp_seq_num
= (u8
)RS_64(qword0
, I40IWCQ_TCPSEQNUM
);
792 info
->qp_id
= (u32
)RS_64(qword2
, I40IWCQ_QPID
);
794 get_64bit_val(cqe
, 8, &comp_ctx
);
796 info
->solicited_event
= (bool)RS_64(qword3
, I40IWCQ_SOEVENT
);
797 info
->is_srq
= (bool)RS_64(qword3
, I40IWCQ_SRQ
);
799 qp
= (struct i40iw_qp_uk
*)(unsigned long)comp_ctx
;
800 wqe_idx
= (u32
)RS_64(qword3
, I40IW_CQ_WQEIDX
);
801 info
->qp_handle
= (i40iw_qp_handle
)(unsigned long)qp
;
803 if (q_type
== I40IW_CQE_QTYPE_RQ
) {
804 array_idx
= (wqe_idx
* 4) / qp
->rq_wqe_size_multiplier
;
805 if (info
->comp_status
== I40IW_COMPL_STATUS_FLUSHED
) {
806 info
->wr_id
= qp
->rq_wrid_array
[qp
->rq_ring
.tail
];
807 array_idx
= qp
->rq_ring
.tail
;
809 info
->wr_id
= qp
->rq_wrid_array
[array_idx
];
812 info
->op_type
= I40IW_OP_TYPE_REC
;
813 if (qword3
& I40IWCQ_STAG_MASK
) {
814 info
->stag_invalid_set
= true;
815 info
->inv_stag
= (u32
)RS_64(qword2
, I40IWCQ_INVSTAG
);
817 info
->stag_invalid_set
= false;
819 info
->bytes_xfered
= (u32
)RS_64(qword0
, I40IWCQ_PAYLDLEN
);
820 I40IW_RING_SET_TAIL(qp
->rq_ring
, array_idx
+ 1);
821 pring
= &qp
->rq_ring
;
823 if (info
->comp_status
!= I40IW_COMPL_STATUS_FLUSHED
) {
824 info
->wr_id
= qp
->sq_wrtrk_array
[wqe_idx
].wrid
;
825 info
->bytes_xfered
= qp
->sq_wrtrk_array
[wqe_idx
].wr_len
;
827 info
->op_type
= (u8
)RS_64(qword3
, I40IWCQ_OP
);
828 sw_wqe
= qp
->sq_base
[wqe_idx
].elem
;
829 get_64bit_val(sw_wqe
, 24, &wqe_qword
);
831 (u8
)RS_64(wqe_qword
, I40IWQPSQ_ADDFRAGCNT
);
832 i40iw_fragcnt_to_wqesize_sq(addl_frag_cnt
+ 1, &addl_wqes
);
834 addl_wqes
= (addl_wqes
/ I40IW_QP_WQE_MIN_SIZE
);
835 I40IW_RING_SET_TAIL(qp
->sq_ring
, (wqe_idx
+ addl_wqes
));
841 tail
= qp
->sq_ring
.tail
;
842 sw_wqe
= qp
->sq_base
[tail
].elem
;
843 get_64bit_val(sw_wqe
, 24, &wqe_qword
);
844 op_type
= (u8
)RS_64(wqe_qword
, I40IWQPSQ_OPCODE
);
845 info
->op_type
= op_type
;
846 addl_frag_cnt
= (u8
)RS_64(wqe_qword
, I40IWQPSQ_ADDFRAGCNT
);
847 i40iw_fragcnt_to_wqesize_sq(addl_frag_cnt
+ 1, &addl_wqes
);
848 addl_wqes
= (addl_wqes
/ I40IW_QP_WQE_MIN_SIZE
);
849 I40IW_RING_SET_TAIL(qp
->sq_ring
, (tail
+ addl_wqes
));
850 if (op_type
!= I40IWQP_OP_NOP
) {
851 info
->wr_id
= qp
->sq_wrtrk_array
[tail
].wrid
;
852 info
->bytes_xfered
= qp
->sq_wrtrk_array
[tail
].wr_len
;
857 pring
= &qp
->sq_ring
;
863 (info
->comp_status
== I40IW_COMPL_STATUS_FLUSHED
))
864 if (pring
&& (I40IW_RING_MORE_WORK(*pring
)))
865 move_cq_head
= false;
868 I40IW_RING_MOVE_HEAD(cq
->cq_ring
, ret_code2
);
870 if (ret_code2
&& !ret_code
)
871 ret_code
= ret_code2
;
873 if (I40IW_RING_GETCURRENT_HEAD(cq
->cq_ring
) == 0)
877 I40IW_RING_MOVE_TAIL(cq
->cq_ring
);
878 set_64bit_val(cq
->shadow_area
, 0,
879 I40IW_RING_GETCURRENT_HEAD(cq
->cq_ring
));
884 qword3
&= ~I40IW_CQ_WQEIDX_MASK
;
885 qword3
|= LS_64(pring
->tail
, I40IW_CQ_WQEIDX
);
886 set_64bit_val(cqe
, 24, qword3
);
893 * i40iw_get_wqe_shift - get shift count for maximum wqe size
894 * @wqdepth: depth of wq required.
895 * @sge: Maximum Scatter Gather Elements wqe
896 * @shift: Returns the shift needed based on sge
898 * Shift can be used to left shift the wqe size based on sge.
899 * If sge, == 1, shift =0 (wqe_size of 32 bytes), for sge=2 and 3, shift =1
900 * (64 bytes wqes) and 2 otherwise (128 bytes wqe).
902 enum i40iw_status_code
i40iw_get_wqe_shift(u32 wqdepth
, u8 sge
, u8
*shift
)
908 *shift
= (sge
< 4) ? 1 : 2;
910 /* check if wqdepth is multiple of 2 or not */
912 if ((wqdepth
< I40IWQP_SW_MIN_WQSIZE
) || (wqdepth
& (wqdepth
- 1)))
913 return I40IW_ERR_INVALID_SIZE
;
915 size
= wqdepth
<< *shift
; /* multiple of 32 bytes count */
916 if (size
> I40IWQP_SW_MAX_WQSIZE
)
917 return I40IW_ERR_INVALID_SIZE
;
921 static struct i40iw_qp_uk_ops iw_qp_uk_ops
= {
923 i40iw_qp_ring_push_db
,
927 i40iw_inline_rdma_write
,
929 i40iw_stag_local_invalidate
,
935 static struct i40iw_cq_ops iw_cq_ops
= {
936 i40iw_cq_request_notification
,
937 i40iw_cq_poll_completion
,
938 i40iw_cq_post_entries
,
942 static struct i40iw_device_uk_ops iw_device_uk_ops
= {
948 * i40iw_qp_uk_init - initialize shared qp
949 * @qp: hw qp (user and kernel)
950 * @info: qp initialization info
952 * initializes the vars used in both user and kernel mode.
953 * size of the wqe depends on numbers of max. fragements
954 * allowed. Then size of wqe * the number of wqes should be the
955 * amount of memory allocated for sq and rq. If srq is used,
956 * then rq_base will point to one rq wqe only (not the whole
959 enum i40iw_status_code
i40iw_qp_uk_init(struct i40iw_qp_uk
*qp
,
960 struct i40iw_qp_uk_init_info
*info
)
962 enum i40iw_status_code ret_code
= 0;
966 if (info
->max_sq_frag_cnt
> I40IW_MAX_WQ_FRAGMENT_COUNT
)
967 return I40IW_ERR_INVALID_FRAG_COUNT
;
969 if (info
->max_rq_frag_cnt
> I40IW_MAX_WQ_FRAGMENT_COUNT
)
970 return I40IW_ERR_INVALID_FRAG_COUNT
;
971 ret_code
= i40iw_get_wqe_shift(info
->sq_size
, info
->max_sq_frag_cnt
, &sqshift
);
975 ret_code
= i40iw_get_wqe_shift(info
->rq_size
, info
->max_rq_frag_cnt
, &rqshift
);
979 qp
->sq_base
= info
->sq
;
980 qp
->rq_base
= info
->rq
;
981 qp
->shadow_area
= info
->shadow_area
;
982 qp
->sq_wrtrk_array
= info
->sq_wrtrk_array
;
983 qp
->rq_wrid_array
= info
->rq_wrid_array
;
985 qp
->wqe_alloc_reg
= info
->wqe_alloc_reg
;
986 qp
->qp_id
= info
->qp_id
;
988 qp
->sq_size
= info
->sq_size
;
989 qp
->push_db
= info
->push_db
;
990 qp
->push_wqe
= info
->push_wqe
;
992 qp
->max_sq_frag_cnt
= info
->max_sq_frag_cnt
;
993 sq_ring_size
= qp
->sq_size
<< sqshift
;
995 I40IW_RING_INIT(qp
->sq_ring
, sq_ring_size
);
996 I40IW_RING_INIT(qp
->initial_ring
, sq_ring_size
);
997 I40IW_RING_MOVE_HEAD(qp
->sq_ring
, ret_code
);
998 I40IW_RING_MOVE_TAIL(qp
->sq_ring
);
999 I40IW_RING_MOVE_HEAD(qp
->initial_ring
, ret_code
);
1000 qp
->swqe_polarity
= 1;
1001 qp
->swqe_polarity_deferred
= 1;
1002 qp
->rwqe_polarity
= 0;
1005 qp
->rq_size
= info
->rq_size
;
1006 qp
->max_rq_frag_cnt
= info
->max_rq_frag_cnt
;
1007 qp
->rq_wqe_size
= rqshift
;
1008 I40IW_RING_INIT(qp
->rq_ring
, qp
->rq_size
);
1009 qp
->rq_wqe_size_multiplier
= 4 << rqshift
;
1011 qp
->ops
= iw_qp_uk_ops
;
1017 * i40iw_cq_uk_init - initialize shared cq (user and kernel)
1019 * @info: hw cq initialization info
1021 enum i40iw_status_code
i40iw_cq_uk_init(struct i40iw_cq_uk
*cq
,
1022 struct i40iw_cq_uk_init_info
*info
)
1024 if ((info
->cq_size
< I40IW_MIN_CQ_SIZE
) ||
1025 (info
->cq_size
> I40IW_MAX_CQ_SIZE
))
1026 return I40IW_ERR_INVALID_SIZE
;
1027 cq
->cq_base
= (struct i40iw_cqe
*)info
->cq_base
;
1028 cq
->cq_id
= info
->cq_id
;
1029 cq
->cq_size
= info
->cq_size
;
1030 cq
->cqe_alloc_reg
= info
->cqe_alloc_reg
;
1031 cq
->shadow_area
= info
->shadow_area
;
1032 cq
->avoid_mem_cflct
= info
->avoid_mem_cflct
;
1034 I40IW_RING_INIT(cq
->cq_ring
, cq
->cq_size
);
1036 cq
->ops
= iw_cq_ops
;
1042 * i40iw_device_init_uk - setup routines for iwarp shared device
1043 * @dev: iwarp shared (user and kernel)
1045 void i40iw_device_init_uk(struct i40iw_dev_uk
*dev
)
1047 dev
->ops_uk
= iw_device_uk_ops
;
1051 * i40iw_clean_cq - clean cq entries
1052 * @ queue completion context
1055 void i40iw_clean_cq(void *queue
, struct i40iw_cq_uk
*cq
)
1058 u64 qword3
, comp_ctx
;
1062 cq_head
= cq
->cq_ring
.head
;
1063 temp
= cq
->polarity
;
1065 if (cq
->avoid_mem_cflct
)
1066 cqe
= (u64
*)&(((struct i40iw_extended_cqe
*)cq
->cq_base
)[cq_head
]);
1068 cqe
= (u64
*)&cq
->cq_base
[cq_head
];
1069 get_64bit_val(cqe
, 24, &qword3
);
1070 polarity
= (u8
)RS_64(qword3
, I40IW_CQ_VALID
);
1072 if (polarity
!= temp
)
1075 get_64bit_val(cqe
, 8, &comp_ctx
);
1076 if ((void *)(unsigned long)comp_ctx
== queue
)
1077 set_64bit_val(cqe
, 8, 0);
1079 cq_head
= (cq_head
+ 1) % cq
->cq_ring
.size
;
1086 * i40iw_nop - send a nop
1088 * @wr_id: work request id
1089 * @signaled: flag if signaled for completion
1090 * @post_sq: flag to post sq
1092 enum i40iw_status_code
i40iw_nop(struct i40iw_qp_uk
*qp
,
1100 wqe
= i40iw_qp_get_next_send_wqe(qp
, &wqe_idx
, I40IW_QP_WQE_MIN_SIZE
);
1102 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
1104 qp
->sq_wrtrk_array
[wqe_idx
].wrid
= wr_id
;
1105 qp
->sq_wrtrk_array
[wqe_idx
].wr_len
= 0;
1106 set_64bit_val(wqe
, 0, 0);
1107 set_64bit_val(wqe
, 8, 0);
1108 set_64bit_val(wqe
, 16, 0);
1110 header
= LS_64(I40IWQP_OP_NOP
, I40IWQPSQ_OPCODE
) |
1111 LS_64(signaled
, I40IWQPSQ_SIGCOMPL
) |
1112 LS_64(qp
->swqe_polarity
, I40IWQPSQ_VALID
);
1114 wmb(); /* make sure WQE is populated before valid bit is set */
1116 set_64bit_val(wqe
, 24, header
);
1118 i40iw_qp_post_wr(qp
);
1124 * i40iw_fragcnt_to_wqesize_sq - calculate wqe size based on fragment count for SQ
1125 * @frag_cnt: number of fragments
1126 * @wqe_size: size of sq wqe returned
1128 enum i40iw_status_code
i40iw_fragcnt_to_wqesize_sq(u8 frag_cnt
, u8
*wqe_size
)
1133 *wqe_size
= I40IW_QP_WQE_MIN_SIZE
;
1148 return I40IW_ERR_INVALID_FRAG_COUNT
;
1155 * i40iw_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
1156 * @frag_cnt: number of fragments
1157 * @wqe_size: size of rq wqe returned
1159 enum i40iw_status_code
i40iw_fragcnt_to_wqesize_rq(u8 frag_cnt
, u8
*wqe_size
)
1177 return I40IW_ERR_INVALID_FRAG_COUNT
;
1184 * i40iw_inline_data_size_to_wqesize - based on inline data, wqe size
1185 * @data_size: data size for inline
1186 * @wqe_size: size of sq wqe returned
1188 enum i40iw_status_code
i40iw_inline_data_size_to_wqesize(u32 data_size
,
1191 if (data_size
> I40IW_MAX_INLINE_DATA_SIZE
)
1192 return I40IW_ERR_INVALID_IMM_DATA_SIZE
;
1194 if (data_size
<= 16)
1195 *wqe_size
= I40IW_QP_WQE_MIN_SIZE
;
1196 else if (data_size
<= 48)
1198 else if (data_size
<= 80)