1 /*******************************************************************************
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 *******************************************************************************/
35 #include "i40iw_osdep.h"
36 #include "i40iw_register.h"
37 #include "i40iw_status.h"
38 #include "i40iw_hmc.h"
41 #include "i40iw_type.h"
43 #include "i40iw_puda.h"
45 static void i40iw_ieq_receive(struct i40iw_sc_vsi
*vsi
,
46 struct i40iw_puda_buf
*buf
);
47 static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi
*vsi
, void *sqwrid
);
48 static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp
*qp
, u32 wqe_idx
);
49 static enum i40iw_status_code
i40iw_puda_replenish_rq(struct i40iw_puda_rsrc
52 * i40iw_puda_get_listbuf - get buffer from puda list
53 * @list: list to use for buffers (ILQ or IEQ)
55 static struct i40iw_puda_buf
*i40iw_puda_get_listbuf(struct list_head
*list
)
57 struct i40iw_puda_buf
*buf
= NULL
;
59 if (!list_empty(list
)) {
60 buf
= (struct i40iw_puda_buf
*)list
->next
;
61 list_del((struct list_head
*)&buf
->list
);
67 * i40iw_puda_get_bufpool - return buffer from resource
68 * @rsrc: resource to use for buffer
70 struct i40iw_puda_buf
*i40iw_puda_get_bufpool(struct i40iw_puda_rsrc
*rsrc
)
72 struct i40iw_puda_buf
*buf
= NULL
;
73 struct list_head
*list
= &rsrc
->bufpool
;
76 spin_lock_irqsave(&rsrc
->bufpool_lock
, flags
);
77 buf
= i40iw_puda_get_listbuf(list
);
79 rsrc
->avail_buf_count
--;
81 rsrc
->stats_buf_alloc_fail
++;
82 spin_unlock_irqrestore(&rsrc
->bufpool_lock
, flags
);
87 * i40iw_puda_ret_bufpool - return buffer to rsrc list
88 * @rsrc: resource to use for buffer
89 * @buf: buffe to return to resouce
91 void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc
*rsrc
,
92 struct i40iw_puda_buf
*buf
)
96 spin_lock_irqsave(&rsrc
->bufpool_lock
, flags
);
97 list_add(&buf
->list
, &rsrc
->bufpool
);
98 spin_unlock_irqrestore(&rsrc
->bufpool_lock
, flags
);
99 rsrc
->avail_buf_count
++;
103 * i40iw_puda_post_recvbuf - set wqe for rcv buffer
104 * @rsrc: resource ptr
105 * @wqe_idx: wqe index to use
106 * @buf: puda buffer for rcv q
107 * @initial: flag if during init time
109 static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc
*rsrc
, u32 wqe_idx
,
110 struct i40iw_puda_buf
*buf
, bool initial
)
113 struct i40iw_sc_qp
*qp
= &rsrc
->qp
;
116 qp
->qp_uk
.rq_wrid_array
[wqe_idx
] = (uintptr_t)buf
;
117 wqe
= qp
->qp_uk
.rq_base
[wqe_idx
].elem
;
118 i40iw_debug(rsrc
->dev
, I40IW_DEBUG_PUDA
,
119 "%s: wqe_idx= %d buf = %p wqe = %p\n", __func__
,
122 get_64bit_val(wqe
, 24, &offset24
);
124 offset24
= (offset24
) ? 0 : LS_64(1, I40IWQPSQ_VALID
);
126 set_64bit_val(wqe
, 0, buf
->mem
.pa
);
127 set_64bit_val(wqe
, 8,
128 LS_64(buf
->mem
.size
, I40IWQPSQ_FRAG_LEN
));
129 i40iw_insert_wqe_hdr(wqe
, offset24
);
133 * i40iw_puda_replenish_rq - post rcv buffers
134 * @rsrc: resource to use for buffer
135 * @initial: flag if during init time
137 static enum i40iw_status_code
i40iw_puda_replenish_rq(struct i40iw_puda_rsrc
*rsrc
,
141 u32 invalid_cnt
= rsrc
->rxq_invalid_cnt
;
142 struct i40iw_puda_buf
*buf
= NULL
;
144 for (i
= 0; i
< invalid_cnt
; i
++) {
145 buf
= i40iw_puda_get_bufpool(rsrc
);
147 return I40IW_ERR_list_empty
;
148 i40iw_puda_post_recvbuf(rsrc
, rsrc
->rx_wqe_idx
, buf
,
151 ((rsrc
->rx_wqe_idx
+ 1) % rsrc
->rq_size
);
152 rsrc
->rxq_invalid_cnt
--;
158 * i40iw_puda_alloc_buf - allocate mem for buffer
160 * @length: length of buffer
162 static struct i40iw_puda_buf
*i40iw_puda_alloc_buf(struct i40iw_sc_dev
*dev
,
165 struct i40iw_puda_buf
*buf
= NULL
;
166 struct i40iw_virt_mem buf_mem
;
167 enum i40iw_status_code ret
;
169 ret
= i40iw_allocate_virt_mem(dev
->hw
, &buf_mem
,
170 sizeof(struct i40iw_puda_buf
));
172 i40iw_debug(dev
, I40IW_DEBUG_PUDA
,
173 "%s: error mem for buf\n", __func__
);
176 buf
= (struct i40iw_puda_buf
*)buf_mem
.va
;
177 ret
= i40iw_allocate_dma_mem(dev
->hw
, &buf
->mem
, length
, 1);
179 i40iw_debug(dev
, I40IW_DEBUG_PUDA
,
180 "%s: error dma mem for buf\n", __func__
);
181 i40iw_free_virt_mem(dev
->hw
, &buf_mem
);
184 buf
->buf_mem
.va
= buf_mem
.va
;
185 buf
->buf_mem
.size
= buf_mem
.size
;
190 * i40iw_puda_dele_buf - delete buffer back to system
192 * @buf: buffer to free
194 static void i40iw_puda_dele_buf(struct i40iw_sc_dev
*dev
,
195 struct i40iw_puda_buf
*buf
)
197 i40iw_free_dma_mem(dev
->hw
, &buf
->mem
);
198 i40iw_free_virt_mem(dev
->hw
, &buf
->buf_mem
);
202 * i40iw_puda_get_next_send_wqe - return next wqe for processing
203 * @qp: puda qp for wqe
204 * @wqe_idx: wqe index for caller
206 static u64
*i40iw_puda_get_next_send_wqe(struct i40iw_qp_uk
*qp
, u32
*wqe_idx
)
209 enum i40iw_status_code ret_code
= 0;
211 *wqe_idx
= I40IW_RING_GETCURRENT_HEAD(qp
->sq_ring
);
213 qp
->swqe_polarity
= !qp
->swqe_polarity
;
214 I40IW_RING_MOVE_HEAD(qp
->sq_ring
, ret_code
);
217 wqe
= qp
->sq_base
[*wqe_idx
].elem
;
223 * i40iw_puda_poll_info - poll cq for completion
225 * @info: info return for successful completion
227 static enum i40iw_status_code
i40iw_puda_poll_info(struct i40iw_sc_cq
*cq
,
228 struct i40iw_puda_completion_info
*info
)
230 u64 qword0
, qword2
, qword3
;
234 u32 major_err
, minor_err
;
237 cqe
= (u64
*)I40IW_GET_CURRENT_CQ_ELEMENT(&cq
->cq_uk
);
238 get_64bit_val(cqe
, 24, &qword3
);
239 valid_bit
= (bool)RS_64(qword3
, I40IW_CQ_VALID
);
241 if (valid_bit
!= cq
->cq_uk
.polarity
)
242 return I40IW_ERR_QUEUE_EMPTY
;
244 i40iw_debug_buf(cq
->dev
, I40IW_DEBUG_PUDA
, "PUDA CQE", cqe
, 32);
245 error
= (bool)RS_64(qword3
, I40IW_CQ_ERROR
);
247 i40iw_debug(cq
->dev
, I40IW_DEBUG_PUDA
, "%s receive error\n", __func__
);
248 major_err
= (u32
)(RS_64(qword3
, I40IW_CQ_MAJERR
));
249 minor_err
= (u32
)(RS_64(qword3
, I40IW_CQ_MINERR
));
250 info
->compl_error
= major_err
<< 16 | minor_err
;
251 return I40IW_ERR_CQ_COMPL_ERROR
;
254 get_64bit_val(cqe
, 0, &qword0
);
255 get_64bit_val(cqe
, 16, &qword2
);
257 info
->q_type
= (u8
)RS_64(qword3
, I40IW_CQ_SQ
);
258 info
->qp_id
= (u32
)RS_64(qword2
, I40IWCQ_QPID
);
260 get_64bit_val(cqe
, 8, &comp_ctx
);
261 info
->qp
= (struct i40iw_qp_uk
*)(unsigned long)comp_ctx
;
262 info
->wqe_idx
= (u32
)RS_64(qword3
, I40IW_CQ_WQEIDX
);
264 if (info
->q_type
== I40IW_CQE_QTYPE_RQ
) {
265 info
->vlan_valid
= (bool)RS_64(qword3
, I40IW_VLAN_TAG_VALID
);
266 info
->l4proto
= (u8
)RS_64(qword2
, I40IW_UDA_L4PROTO
);
267 info
->l3proto
= (u8
)RS_64(qword2
, I40IW_UDA_L3PROTO
);
268 info
->payload_len
= (u16
)RS_64(qword0
, I40IW_UDA_PAYLOADLEN
);
275 * i40iw_puda_poll_completion - processes completion for cq
277 * @cq: cq getting interrupt
278 * @compl_err: return any completion err
280 enum i40iw_status_code
i40iw_puda_poll_completion(struct i40iw_sc_dev
*dev
,
281 struct i40iw_sc_cq
*cq
, u32
*compl_err
)
283 struct i40iw_qp_uk
*qp
;
284 struct i40iw_cq_uk
*cq_uk
= &cq
->cq_uk
;
285 struct i40iw_puda_completion_info info
;
286 enum i40iw_status_code ret
= 0;
287 struct i40iw_puda_buf
*buf
;
288 struct i40iw_puda_rsrc
*rsrc
;
290 u8 cq_type
= cq
->cq_type
;
293 if ((cq_type
== I40IW_CQ_TYPE_ILQ
) || (cq_type
== I40IW_CQ_TYPE_IEQ
)) {
294 rsrc
= (cq_type
== I40IW_CQ_TYPE_ILQ
) ? cq
->vsi
->ilq
: cq
->vsi
->ieq
;
296 i40iw_debug(dev
, I40IW_DEBUG_PUDA
, "%s qp_type error\n", __func__
);
297 return I40IW_ERR_BAD_PTR
;
299 memset(&info
, 0, sizeof(info
));
300 ret
= i40iw_puda_poll_info(cq
, &info
);
301 *compl_err
= info
.compl_error
;
302 if (ret
== I40IW_ERR_QUEUE_EMPTY
)
309 ret
= I40IW_ERR_BAD_PTR
;
313 if (qp
->qp_id
!= rsrc
->qp_id
) {
314 ret
= I40IW_ERR_BAD_PTR
;
318 if (info
.q_type
== I40IW_CQE_QTYPE_RQ
) {
319 buf
= (struct i40iw_puda_buf
*)(uintptr_t)qp
->rq_wrid_array
[info
.wqe_idx
];
320 /* Get all the tcpip information in the buf header */
321 ret
= i40iw_puda_get_tcpip_info(&info
, buf
);
323 rsrc
->stats_rcvd_pkt_err
++;
324 if (cq_type
== I40IW_CQ_TYPE_ILQ
) {
325 i40iw_ilq_putback_rcvbuf(&rsrc
->qp
,
328 i40iw_puda_ret_bufpool(rsrc
, buf
);
329 i40iw_puda_replenish_rq(rsrc
, false);
334 rsrc
->stats_pkt_rcvd
++;
335 rsrc
->compl_rxwqe_idx
= info
.wqe_idx
;
336 i40iw_debug(dev
, I40IW_DEBUG_PUDA
, "%s RQ completion\n", __func__
);
337 rsrc
->receive(rsrc
->vsi
, buf
);
338 if (cq_type
== I40IW_CQ_TYPE_ILQ
)
339 i40iw_ilq_putback_rcvbuf(&rsrc
->qp
, info
.wqe_idx
);
341 i40iw_puda_replenish_rq(rsrc
, false);
344 i40iw_debug(dev
, I40IW_DEBUG_PUDA
, "%s SQ completion\n", __func__
);
345 sqwrid
= (void *)(uintptr_t)qp
->sq_wrtrk_array
[info
.wqe_idx
].wrid
;
346 I40IW_RING_SET_TAIL(qp
->sq_ring
, info
.wqe_idx
);
347 rsrc
->xmit_complete(rsrc
->vsi
, sqwrid
);
348 spin_lock_irqsave(&rsrc
->bufpool_lock
, flags
);
349 rsrc
->tx_wqe_avail_cnt
++;
350 spin_unlock_irqrestore(&rsrc
->bufpool_lock
, flags
);
351 if (!list_empty(&rsrc
->txpend
))
352 i40iw_puda_send_buf(rsrc
, NULL
);
356 I40IW_RING_MOVE_HEAD(cq_uk
->cq_ring
, ret
);
357 if (I40IW_RING_GETCURRENT_HEAD(cq_uk
->cq_ring
) == 0)
358 cq_uk
->polarity
= !cq_uk
->polarity
;
359 /* update cq tail in cq shadow memory also */
360 I40IW_RING_MOVE_TAIL(cq_uk
->cq_ring
);
361 set_64bit_val(cq_uk
->shadow_area
, 0,
362 I40IW_RING_GETCURRENT_HEAD(cq_uk
->cq_ring
));
367 * i40iw_puda_send - complete send wqe for transmit
368 * @qp: puda qp for send
369 * @info: buffer information for transmit
371 enum i40iw_status_code
i40iw_puda_send(struct i40iw_sc_qp
*qp
,
372 struct i40iw_puda_send_info
*info
)
380 /* number of 32 bits DWORDS in header */
381 l4len
= info
->tcplen
>> 2;
390 wqe
= i40iw_puda_get_next_send_wqe(&qp
->qp_uk
, &wqe_idx
);
392 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
393 qp
->qp_uk
.sq_wrtrk_array
[wqe_idx
].wrid
= (uintptr_t)info
->scratch
;
394 /* Third line of WQE descriptor */
395 /* maclen is in words */
396 header
[0] = LS_64((info
->maclen
>> 1), I40IW_UDA_QPSQ_MACLEN
) |
397 LS_64(iplen
, I40IW_UDA_QPSQ_IPLEN
) | LS_64(1, I40IW_UDA_QPSQ_L4T
) |
398 LS_64(iipt
, I40IW_UDA_QPSQ_IIPT
) |
399 LS_64(l4len
, I40IW_UDA_QPSQ_L4LEN
);
400 /* Forth line of WQE descriptor */
401 header
[1] = LS_64(I40IW_OP_TYPE_SEND
, I40IW_UDA_QPSQ_OPCODE
) |
402 LS_64(1, I40IW_UDA_QPSQ_SIGCOMPL
) |
403 LS_64(info
->doloopback
, I40IW_UDA_QPSQ_DOLOOPBACK
) |
404 LS_64(qp
->qp_uk
.swqe_polarity
, I40IW_UDA_QPSQ_VALID
);
406 set_64bit_val(wqe
, 0, info
->paddr
);
407 set_64bit_val(wqe
, 8, LS_64(info
->len
, I40IWQPSQ_FRAG_LEN
));
408 set_64bit_val(wqe
, 16, header
[0]);
410 i40iw_insert_wqe_hdr(wqe
, header
[1]);
412 i40iw_debug_buf(qp
->dev
, I40IW_DEBUG_PUDA
, "PUDA SEND WQE", wqe
, 32);
413 i40iw_qp_post_wr(&qp
->qp_uk
);
418 * i40iw_puda_send_buf - transmit puda buffer
419 * @rsrc: resource to use for buffer
420 * @buf: puda buffer to transmit
422 void i40iw_puda_send_buf(struct i40iw_puda_rsrc
*rsrc
, struct i40iw_puda_buf
*buf
)
424 struct i40iw_puda_send_info info
;
425 enum i40iw_status_code ret
= 0;
428 spin_lock_irqsave(&rsrc
->bufpool_lock
, flags
);
429 /* if no wqe available or not from a completion and we have
430 * pending buffers, we must queue new buffer
432 if (!rsrc
->tx_wqe_avail_cnt
|| (buf
&& !list_empty(&rsrc
->txpend
))) {
433 list_add_tail(&buf
->list
, &rsrc
->txpend
);
434 spin_unlock_irqrestore(&rsrc
->bufpool_lock
, flags
);
435 rsrc
->stats_sent_pkt_q
++;
436 if (rsrc
->type
== I40IW_PUDA_RSRC_TYPE_ILQ
)
437 i40iw_debug(rsrc
->dev
, I40IW_DEBUG_PUDA
,
438 "%s: adding to txpend\n", __func__
);
441 rsrc
->tx_wqe_avail_cnt
--;
442 /* if we are coming from a completion and have pending buffers
443 * then Get one from pending list
446 buf
= i40iw_puda_get_listbuf(&rsrc
->txpend
);
451 info
.scratch
= (void *)buf
;
452 info
.paddr
= buf
->mem
.pa
;
453 info
.len
= buf
->totallen
;
454 info
.tcplen
= buf
->tcphlen
;
455 info
.maclen
= buf
->maclen
;
456 info
.ipv4
= buf
->ipv4
;
457 info
.doloopback
= (rsrc
->type
== I40IW_PUDA_RSRC_TYPE_IEQ
);
459 ret
= i40iw_puda_send(&rsrc
->qp
, &info
);
461 rsrc
->tx_wqe_avail_cnt
++;
462 rsrc
->stats_sent_pkt_q
++;
463 list_add(&buf
->list
, &rsrc
->txpend
);
464 if (rsrc
->type
== I40IW_PUDA_RSRC_TYPE_ILQ
)
465 i40iw_debug(rsrc
->dev
, I40IW_DEBUG_PUDA
,
466 "%s: adding to puda_send\n", __func__
);
468 rsrc
->stats_pkt_sent
++;
471 spin_unlock_irqrestore(&rsrc
->bufpool_lock
, flags
);
475 * i40iw_puda_qp_setctx - during init, set qp's context
476 * @rsrc: qp's resource
478 static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc
*rsrc
)
480 struct i40iw_sc_qp
*qp
= &rsrc
->qp
;
481 u64
*qp_ctx
= qp
->hw_host_ctx
;
483 set_64bit_val(qp_ctx
, 8, qp
->sq_pa
);
484 set_64bit_val(qp_ctx
, 16, qp
->rq_pa
);
486 set_64bit_val(qp_ctx
, 24,
487 LS_64(qp
->hw_rq_size
, I40IWQPC_RQSIZE
) |
488 LS_64(qp
->hw_sq_size
, I40IWQPC_SQSIZE
));
490 set_64bit_val(qp_ctx
, 48, LS_64(rsrc
->buf_size
, I40IW_UDA_QPC_MAXFRAMESIZE
));
491 set_64bit_val(qp_ctx
, 56, 0);
492 set_64bit_val(qp_ctx
, 64, 1);
494 set_64bit_val(qp_ctx
, 136,
495 LS_64(rsrc
->cq_id
, I40IWQPC_TXCQNUM
) |
496 LS_64(rsrc
->cq_id
, I40IWQPC_RXCQNUM
));
498 set_64bit_val(qp_ctx
, 160, LS_64(1, I40IWQPC_PRIVEN
));
500 set_64bit_val(qp_ctx
, 168,
501 LS_64((uintptr_t)qp
, I40IWQPC_QPCOMPCTX
));
503 set_64bit_val(qp_ctx
, 176,
504 LS_64(qp
->sq_tph_val
, I40IWQPC_SQTPHVAL
) |
505 LS_64(qp
->rq_tph_val
, I40IWQPC_RQTPHVAL
) |
506 LS_64(qp
->qs_handle
, I40IWQPC_QSHANDLE
));
508 i40iw_debug_buf(rsrc
->dev
, I40IW_DEBUG_PUDA
, "PUDA QP CONTEXT",
509 qp_ctx
, I40IW_QP_CTX_SIZE
);
513 * i40iw_puda_qp_wqe - setup wqe for qp create
514 * @rsrc: resource for qp
516 static enum i40iw_status_code
i40iw_puda_qp_wqe(struct i40iw_sc_dev
*dev
, struct i40iw_sc_qp
*qp
)
518 struct i40iw_sc_cqp
*cqp
;
521 struct i40iw_ccq_cqe_info compl_info
;
522 enum i40iw_status_code status
= 0;
525 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, 0);
527 return I40IW_ERR_RING_FULL
;
529 set_64bit_val(wqe
, 16, qp
->hw_host_ctx_pa
);
530 set_64bit_val(wqe
, 40, qp
->shadow_area_pa
);
531 header
= qp
->qp_uk
.qp_id
|
532 LS_64(I40IW_CQP_OP_CREATE_QP
, I40IW_CQPSQ_OPCODE
) |
533 LS_64(I40IW_QP_TYPE_UDA
, I40IW_CQPSQ_QP_QPTYPE
) |
534 LS_64(1, I40IW_CQPSQ_QP_CQNUMVALID
) |
535 LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE
) |
536 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
538 i40iw_insert_wqe_hdr(wqe
, header
);
540 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_PUDA
, "PUDA CQE", wqe
, 32);
541 i40iw_sc_cqp_post_sq(cqp
);
542 status
= dev
->cqp_ops
->poll_for_cqp_op_done(dev
->cqp
,
543 I40IW_CQP_OP_CREATE_QP
,
549 * i40iw_puda_qp_create - create qp for resource
550 * @rsrc: resource to use for buffer
552 static enum i40iw_status_code
i40iw_puda_qp_create(struct i40iw_puda_rsrc
*rsrc
)
554 struct i40iw_sc_qp
*qp
= &rsrc
->qp
;
555 struct i40iw_qp_uk
*ukqp
= &qp
->qp_uk
;
556 enum i40iw_status_code ret
= 0;
557 u32 sq_size
, rq_size
, t_size
;
558 struct i40iw_dma_mem
*mem
;
560 sq_size
= rsrc
->sq_size
* I40IW_QP_WQE_MIN_SIZE
;
561 rq_size
= rsrc
->rq_size
* I40IW_QP_WQE_MIN_SIZE
;
562 t_size
= (sq_size
+ rq_size
+ (I40IW_SHADOW_AREA_SIZE
<< 3) +
564 /* Get page aligned memory */
566 i40iw_allocate_dma_mem(rsrc
->dev
->hw
, &rsrc
->qpmem
, t_size
,
569 i40iw_debug(rsrc
->dev
, I40IW_DEBUG_PUDA
, "%s: error dma mem\n", __func__
);
574 memset(mem
->va
, 0, t_size
);
575 qp
->hw_sq_size
= i40iw_get_encoded_wqe_size(rsrc
->sq_size
, false);
576 qp
->hw_rq_size
= i40iw_get_encoded_wqe_size(rsrc
->rq_size
, false);
577 qp
->pd
= &rsrc
->sc_pd
;
578 qp
->qp_type
= I40IW_QP_TYPE_UDA
;
580 qp
->back_qp
= (void *)rsrc
;
582 qp
->rq_pa
= qp
->sq_pa
+ sq_size
;
584 ukqp
->sq_base
= mem
->va
;
585 ukqp
->rq_base
= &ukqp
->sq_base
[rsrc
->sq_size
];
586 ukqp
->shadow_area
= ukqp
->rq_base
[rsrc
->rq_size
].elem
;
587 qp
->shadow_area_pa
= qp
->rq_pa
+ rq_size
;
588 qp
->hw_host_ctx
= ukqp
->shadow_area
+ I40IW_SHADOW_AREA_SIZE
;
590 qp
->shadow_area_pa
+ (I40IW_SHADOW_AREA_SIZE
<< 3);
591 ukqp
->qp_id
= rsrc
->qp_id
;
592 ukqp
->sq_wrtrk_array
= rsrc
->sq_wrtrk_array
;
593 ukqp
->rq_wrid_array
= rsrc
->rq_wrid_array
;
595 ukqp
->qp_id
= rsrc
->qp_id
;
596 ukqp
->sq_size
= rsrc
->sq_size
;
597 ukqp
->rq_size
= rsrc
->rq_size
;
599 I40IW_RING_INIT(ukqp
->sq_ring
, ukqp
->sq_size
);
600 I40IW_RING_INIT(ukqp
->initial_ring
, ukqp
->sq_size
);
601 I40IW_RING_INIT(ukqp
->rq_ring
, ukqp
->rq_size
);
603 if (qp
->pd
->dev
->is_pf
)
604 ukqp
->wqe_alloc_reg
= (u32 __iomem
*)(i40iw_get_hw_addr(qp
->pd
->dev
) +
607 ukqp
->wqe_alloc_reg
= (u32 __iomem
*)(i40iw_get_hw_addr(qp
->pd
->dev
) +
608 I40E_VFPE_WQEALLOC1
);
611 i40iw_qp_add_qos(qp
);
612 i40iw_puda_qp_setctx(rsrc
);
613 if (rsrc
->dev
->ceq_valid
)
614 ret
= i40iw_cqp_qp_create_cmd(rsrc
->dev
, qp
);
616 ret
= i40iw_puda_qp_wqe(rsrc
->dev
, qp
);
618 i40iw_qp_rem_qos(qp
);
619 i40iw_free_dma_mem(rsrc
->dev
->hw
, &rsrc
->qpmem
);
625 * i40iw_puda_cq_wqe - setup wqe for cq create
626 * @rsrc: resource for cq
628 static enum i40iw_status_code
i40iw_puda_cq_wqe(struct i40iw_sc_dev
*dev
, struct i40iw_sc_cq
*cq
)
631 struct i40iw_sc_cqp
*cqp
;
633 struct i40iw_ccq_cqe_info compl_info
;
634 enum i40iw_status_code status
= 0;
637 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, 0);
639 return I40IW_ERR_RING_FULL
;
641 set_64bit_val(wqe
, 0, cq
->cq_uk
.cq_size
);
642 set_64bit_val(wqe
, 8, RS_64_1(cq
, 1));
643 set_64bit_val(wqe
, 16,
644 LS_64(cq
->shadow_read_threshold
,
645 I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD
));
646 set_64bit_val(wqe
, 32, cq
->cq_pa
);
648 set_64bit_val(wqe
, 40, cq
->shadow_area_pa
);
650 header
= cq
->cq_uk
.cq_id
|
651 LS_64(I40IW_CQP_OP_CREATE_CQ
, I40IW_CQPSQ_OPCODE
) |
652 LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW
) |
653 LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK
) |
654 LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID
) |
655 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
656 i40iw_insert_wqe_hdr(wqe
, header
);
658 i40iw_debug_buf(dev
, I40IW_DEBUG_PUDA
, "PUDA CQE",
659 wqe
, I40IW_CQP_WQE_SIZE
* 8);
661 i40iw_sc_cqp_post_sq(dev
->cqp
);
662 status
= dev
->cqp_ops
->poll_for_cqp_op_done(dev
->cqp
,
663 I40IW_CQP_OP_CREATE_CQ
,
669 * i40iw_puda_cq_create - create cq for resource
670 * @rsrc: resource for which cq to create
672 static enum i40iw_status_code
i40iw_puda_cq_create(struct i40iw_puda_rsrc
*rsrc
)
674 struct i40iw_sc_dev
*dev
= rsrc
->dev
;
675 struct i40iw_sc_cq
*cq
= &rsrc
->cq
;
676 enum i40iw_status_code ret
= 0;
678 struct i40iw_dma_mem
*mem
;
679 struct i40iw_cq_init_info info
;
680 struct i40iw_cq_uk_init_info
*init_info
= &info
.cq_uk_init_info
;
683 cqsize
= rsrc
->cq_size
* (sizeof(struct i40iw_cqe
));
684 tsize
= cqsize
+ sizeof(struct i40iw_cq_shadow_area
);
685 ret
= i40iw_allocate_dma_mem(dev
->hw
, &rsrc
->cqmem
, tsize
,
686 I40IW_CQ0_ALIGNMENT
);
691 memset(&info
, 0, sizeof(info
));
693 info
.type
= (rsrc
->type
== I40IW_PUDA_RSRC_TYPE_ILQ
) ?
694 I40IW_CQ_TYPE_ILQ
: I40IW_CQ_TYPE_IEQ
;
695 info
.shadow_read_threshold
= rsrc
->cq_size
>> 2;
696 info
.ceq_id_valid
= true;
697 info
.cq_base_pa
= mem
->pa
;
698 info
.shadow_area_pa
= mem
->pa
+ cqsize
;
699 init_info
->cq_base
= mem
->va
;
700 init_info
->shadow_area
= (u64
*)((u8
*)mem
->va
+ cqsize
);
701 init_info
->cq_size
= rsrc
->cq_size
;
702 init_info
->cq_id
= rsrc
->cq_id
;
703 info
.ceqe_mask
= true;
704 info
.ceq_id_valid
= true;
705 ret
= dev
->iw_priv_cq_ops
->cq_init(cq
, &info
);
708 if (rsrc
->dev
->ceq_valid
)
709 ret
= i40iw_cqp_cq_create_cmd(dev
, cq
);
711 ret
= i40iw_puda_cq_wqe(dev
, cq
);
714 i40iw_free_dma_mem(dev
->hw
, &rsrc
->cqmem
);
719 * i40iw_puda_free_qp - free qp for resource
720 * @rsrc: resource for which qp to free
722 static void i40iw_puda_free_qp(struct i40iw_puda_rsrc
*rsrc
)
724 enum i40iw_status_code ret
;
725 struct i40iw_ccq_cqe_info compl_info
;
726 struct i40iw_sc_dev
*dev
= rsrc
->dev
;
728 if (rsrc
->dev
->ceq_valid
) {
729 i40iw_cqp_qp_destroy_cmd(dev
, &rsrc
->qp
);
733 ret
= dev
->iw_priv_qp_ops
->qp_destroy(&rsrc
->qp
,
734 0, false, true, true);
736 i40iw_debug(dev
, I40IW_DEBUG_PUDA
,
737 "%s error puda qp destroy wqe\n",
741 ret
= dev
->cqp_ops
->poll_for_cqp_op_done(dev
->cqp
,
742 I40IW_CQP_OP_DESTROY_QP
,
745 i40iw_debug(dev
, I40IW_DEBUG_PUDA
,
746 "%s error puda qp destroy failed\n",
752 * i40iw_puda_free_cq - free cq for resource
753 * @rsrc: resource for which cq to free
755 static void i40iw_puda_free_cq(struct i40iw_puda_rsrc
*rsrc
)
757 enum i40iw_status_code ret
;
758 struct i40iw_ccq_cqe_info compl_info
;
759 struct i40iw_sc_dev
*dev
= rsrc
->dev
;
761 if (rsrc
->dev
->ceq_valid
) {
762 i40iw_cqp_cq_destroy_cmd(dev
, &rsrc
->cq
);
765 ret
= dev
->iw_priv_cq_ops
->cq_destroy(&rsrc
->cq
, 0, true);
768 i40iw_debug(dev
, I40IW_DEBUG_PUDA
,
769 "%s error ieq cq destroy\n",
773 ret
= dev
->cqp_ops
->poll_for_cqp_op_done(dev
->cqp
,
774 I40IW_CQP_OP_DESTROY_CQ
,
777 i40iw_debug(dev
, I40IW_DEBUG_PUDA
,
778 "%s error ieq qp destroy done\n",
784 * i40iw_puda_dele_resources - delete all resources during close
786 * @type: type of resource to dele
787 * @reset: true if reset chip
789 void i40iw_puda_dele_resources(struct i40iw_sc_vsi
*vsi
,
790 enum puda_resource_type type
,
793 struct i40iw_sc_dev
*dev
= vsi
->dev
;
794 struct i40iw_puda_rsrc
*rsrc
;
795 struct i40iw_puda_buf
*buf
= NULL
;
796 struct i40iw_puda_buf
*nextbuf
= NULL
;
797 struct i40iw_virt_mem
*vmem
;
800 case I40IW_PUDA_RSRC_TYPE_ILQ
:
802 vmem
= &vsi
->ilq_mem
;
804 case I40IW_PUDA_RSRC_TYPE_IEQ
:
806 vmem
= &vsi
->ieq_mem
;
809 i40iw_debug(dev
, I40IW_DEBUG_PUDA
, "%s: error resource type = 0x%x\n",
814 switch (rsrc
->completion
) {
815 case PUDA_HASH_CRC_COMPLETE
:
816 i40iw_free_hash_desc(rsrc
->hash_desc
);
818 case PUDA_QP_CREATED
:
820 i40iw_puda_free_qp(rsrc
);
822 i40iw_free_dma_mem(dev
->hw
, &rsrc
->qpmem
);
824 case PUDA_CQ_CREATED
:
826 i40iw_puda_free_cq(rsrc
);
828 i40iw_free_dma_mem(dev
->hw
, &rsrc
->cqmem
);
831 i40iw_debug(rsrc
->dev
, I40IW_DEBUG_PUDA
, "%s error no resources\n", __func__
);
834 /* Free all allocated puda buffers for both tx and rx */
835 buf
= rsrc
->alloclist
;
838 i40iw_puda_dele_buf(dev
, buf
);
840 rsrc
->alloc_buf_count
--;
842 i40iw_free_virt_mem(dev
->hw
, vmem
);
846 * i40iw_puda_allocbufs - allocate buffers for resource
847 * @rsrc: resource for buffer allocation
848 * @count: number of buffers to create
850 static enum i40iw_status_code
i40iw_puda_allocbufs(struct i40iw_puda_rsrc
*rsrc
,
854 struct i40iw_puda_buf
*buf
;
855 struct i40iw_puda_buf
*nextbuf
;
857 for (i
= 0; i
< count
; i
++) {
858 buf
= i40iw_puda_alloc_buf(rsrc
->dev
, rsrc
->buf_size
);
860 rsrc
->stats_buf_alloc_fail
++;
861 return I40IW_ERR_NO_MEMORY
;
863 i40iw_puda_ret_bufpool(rsrc
, buf
);
864 rsrc
->alloc_buf_count
++;
865 if (!rsrc
->alloclist
) {
866 rsrc
->alloclist
= buf
;
868 nextbuf
= rsrc
->alloclist
;
869 rsrc
->alloclist
= buf
;
873 rsrc
->avail_buf_count
= rsrc
->alloc_buf_count
;
878 * i40iw_puda_create_rsrc - create resouce (ilq or ieq)
880 * @info: resource information
882 enum i40iw_status_code
i40iw_puda_create_rsrc(struct i40iw_sc_vsi
*vsi
,
883 struct i40iw_puda_rsrc_info
*info
)
885 struct i40iw_sc_dev
*dev
= vsi
->dev
;
886 enum i40iw_status_code ret
= 0;
887 struct i40iw_puda_rsrc
*rsrc
;
889 u32 sqwridsize
, rqwridsize
;
890 struct i40iw_virt_mem
*vmem
;
893 pudasize
= sizeof(struct i40iw_puda_rsrc
);
894 sqwridsize
= info
->sq_size
* sizeof(struct i40iw_sq_uk_wr_trk_info
);
895 rqwridsize
= info
->rq_size
* 8;
896 switch (info
->type
) {
897 case I40IW_PUDA_RSRC_TYPE_ILQ
:
898 vmem
= &vsi
->ilq_mem
;
900 case I40IW_PUDA_RSRC_TYPE_IEQ
:
901 vmem
= &vsi
->ieq_mem
;
904 return I40IW_NOT_SUPPORTED
;
907 i40iw_allocate_virt_mem(dev
->hw
, vmem
,
908 pudasize
+ sqwridsize
+ rqwridsize
);
911 rsrc
= (struct i40iw_puda_rsrc
*)vmem
->va
;
912 spin_lock_init(&rsrc
->bufpool_lock
);
913 if (info
->type
== I40IW_PUDA_RSRC_TYPE_ILQ
) {
914 vsi
->ilq
= (struct i40iw_puda_rsrc
*)vmem
->va
;
915 vsi
->ilq_count
= info
->count
;
916 rsrc
->receive
= info
->receive
;
917 rsrc
->xmit_complete
= info
->xmit_complete
;
919 vmem
= &vsi
->ieq_mem
;
920 vsi
->ieq_count
= info
->count
;
921 vsi
->ieq
= (struct i40iw_puda_rsrc
*)vmem
->va
;
922 rsrc
->receive
= i40iw_ieq_receive
;
923 rsrc
->xmit_complete
= i40iw_ieq_tx_compl
;
926 rsrc
->type
= info
->type
;
927 rsrc
->sq_wrtrk_array
= (struct i40iw_sq_uk_wr_trk_info
*)((u8
*)vmem
->va
+ pudasize
);
928 rsrc
->rq_wrid_array
= (u64
*)((u8
*)vmem
->va
+ pudasize
+ sqwridsize
);
929 /* Initialize all ieq lists */
930 INIT_LIST_HEAD(&rsrc
->bufpool
);
931 INIT_LIST_HEAD(&rsrc
->txpend
);
933 rsrc
->tx_wqe_avail_cnt
= info
->sq_size
- 1;
934 dev
->iw_pd_ops
->pd_init(dev
, &rsrc
->sc_pd
, info
->pd_id
, -1);
935 rsrc
->qp_id
= info
->qp_id
;
936 rsrc
->cq_id
= info
->cq_id
;
937 rsrc
->sq_size
= info
->sq_size
;
938 rsrc
->rq_size
= info
->rq_size
;
939 rsrc
->cq_size
= info
->rq_size
+ info
->sq_size
;
940 rsrc
->buf_size
= info
->buf_size
;
944 ret
= i40iw_puda_cq_create(rsrc
);
946 rsrc
->completion
= PUDA_CQ_CREATED
;
947 ret
= i40iw_puda_qp_create(rsrc
);
950 i40iw_debug(dev
, I40IW_DEBUG_PUDA
, "[%s] error qp_create\n",
954 rsrc
->completion
= PUDA_QP_CREATED
;
956 ret
= i40iw_puda_allocbufs(rsrc
, info
->tx_buf_cnt
+ info
->rq_size
);
958 i40iw_debug(dev
, I40IW_DEBUG_PUDA
, "[%s] error alloc_buf\n",
963 rsrc
->rxq_invalid_cnt
= info
->rq_size
;
964 ret
= i40iw_puda_replenish_rq(rsrc
, true);
968 if (info
->type
== I40IW_PUDA_RSRC_TYPE_IEQ
) {
969 if (!i40iw_init_hash_desc(&rsrc
->hash_desc
)) {
970 rsrc
->check_crc
= true;
971 rsrc
->completion
= PUDA_HASH_CRC_COMPLETE
;
976 dev
->ccq_ops
->ccq_arm(&rsrc
->cq
);
979 i40iw_puda_dele_resources(vsi
, info
->type
, false);
985 * i40iw_ilq_putback_rcvbuf - ilq buffer to put back on rq
986 * @qp: ilq's qp resource
987 * @wqe_idx: wqe index of completed rcvbuf
989 static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp
*qp
, u32 wqe_idx
)
994 wqe
= qp
->qp_uk
.rq_base
[wqe_idx
].elem
;
995 get_64bit_val(wqe
, 24, &offset24
);
996 offset24
= (offset24
) ? 0 : LS_64(1, I40IWQPSQ_VALID
);
997 set_64bit_val(wqe
, 24, offset24
);
1001 * i40iw_ieq_get_fpdu - given length return fpdu length
1002 * @length: length if fpdu
1004 static u16
i40iw_ieq_get_fpdu_length(u16 length
)
1008 fpdu_len
= length
+ I40IW_IEQ_MPA_FRAMING
;
1009 fpdu_len
= (fpdu_len
+ 3) & 0xfffffffc;
1014 * i40iw_ieq_copy_to_txbuf - copydata from rcv buf to tx buf
1015 * @buf: rcv buffer with partial
1016 * @txbuf: tx buffer for sendign back
1017 * @buf_offset: rcv buffer offset to copy from
1018 * @txbuf_offset: at offset in tx buf to copy
1019 * @length: length of data to copy
1021 static void i40iw_ieq_copy_to_txbuf(struct i40iw_puda_buf
*buf
,
1022 struct i40iw_puda_buf
*txbuf
,
1023 u16 buf_offset
, u32 txbuf_offset
,
1026 void *mem1
= (u8
*)buf
->mem
.va
+ buf_offset
;
1027 void *mem2
= (u8
*)txbuf
->mem
.va
+ txbuf_offset
;
1029 memcpy(mem2
, mem1
, length
);
1033 * i40iw_ieq_setup_tx_buf - setup tx buffer for partial handling
1034 * @buf: reeive buffer with partial
1035 * @txbuf: buffer to prepare
1037 static void i40iw_ieq_setup_tx_buf(struct i40iw_puda_buf
*buf
,
1038 struct i40iw_puda_buf
*txbuf
)
1040 txbuf
->maclen
= buf
->maclen
;
1041 txbuf
->tcphlen
= buf
->tcphlen
;
1042 txbuf
->ipv4
= buf
->ipv4
;
1043 txbuf
->hdrlen
= buf
->hdrlen
;
1044 i40iw_ieq_copy_to_txbuf(buf
, txbuf
, 0, 0, buf
->hdrlen
);
1048 * i40iw_ieq_check_first_buf - check if rcv buffer's seq is in range
1049 * @buf: receive exception buffer
1050 * @fps: first partial sequence number
1052 static void i40iw_ieq_check_first_buf(struct i40iw_puda_buf
*buf
, u32 fps
)
1056 if (buf
->seqnum
< fps
) {
1057 offset
= fps
- buf
->seqnum
;
1058 if (offset
> buf
->datalen
)
1060 buf
->data
+= offset
;
1061 buf
->datalen
-= (u16
)offset
;
1067 * i40iw_ieq_compl_pfpdu - write txbuf with full fpdu
1068 * @ieq: ieq resource
1069 * @rxlist: ieq's received buffer list
1070 * @pbufl: temporary list for buffers for fpddu
1071 * @txbuf: tx buffer for fpdu
1072 * @fpdu_len: total length of fpdu
1074 static void i40iw_ieq_compl_pfpdu(struct i40iw_puda_rsrc
*ieq
,
1075 struct list_head
*rxlist
,
1076 struct list_head
*pbufl
,
1077 struct i40iw_puda_buf
*txbuf
,
1080 struct i40iw_puda_buf
*buf
;
1082 u16 txoffset
, bufoffset
;
1084 buf
= i40iw_puda_get_listbuf(pbufl
);
1087 nextseqnum
= buf
->seqnum
+ fpdu_len
;
1088 txbuf
->totallen
= buf
->hdrlen
+ fpdu_len
;
1089 txbuf
->data
= (u8
*)txbuf
->mem
.va
+ buf
->hdrlen
;
1090 i40iw_ieq_setup_tx_buf(buf
, txbuf
);
1092 txoffset
= buf
->hdrlen
;
1093 bufoffset
= (u16
)(buf
->data
- (u8
*)buf
->mem
.va
);
1096 if (buf
->datalen
>= fpdu_len
) {
1097 /* copied full fpdu */
1098 i40iw_ieq_copy_to_txbuf(buf
, txbuf
, bufoffset
, txoffset
, fpdu_len
);
1099 buf
->datalen
-= fpdu_len
;
1100 buf
->data
+= fpdu_len
;
1101 buf
->seqnum
= nextseqnum
;
1104 /* copy partial fpdu */
1105 i40iw_ieq_copy_to_txbuf(buf
, txbuf
, bufoffset
, txoffset
, buf
->datalen
);
1106 txoffset
+= buf
->datalen
;
1107 fpdu_len
-= buf
->datalen
;
1108 i40iw_puda_ret_bufpool(ieq
, buf
);
1109 buf
= i40iw_puda_get_listbuf(pbufl
);
1112 bufoffset
= (u16
)(buf
->data
- (u8
*)buf
->mem
.va
);
1115 /* last buffer on the list*/
1117 list_add(&buf
->list
, rxlist
);
1119 i40iw_puda_ret_bufpool(ieq
, buf
);
1123 * i40iw_ieq_create_pbufl - create buffer list for single fpdu
1124 * @rxlist: resource list for receive ieq buffes
1125 * @pbufl: temp. list for buffers for fpddu
1126 * @buf: first receive buffer
1127 * @fpdu_len: total length of fpdu
1129 static enum i40iw_status_code
i40iw_ieq_create_pbufl(
1130 struct i40iw_pfpdu
*pfpdu
,
1131 struct list_head
*rxlist
,
1132 struct list_head
*pbufl
,
1133 struct i40iw_puda_buf
*buf
,
1136 enum i40iw_status_code status
= 0;
1137 struct i40iw_puda_buf
*nextbuf
;
1139 u16 plen
= fpdu_len
- buf
->datalen
;
1142 nextseqnum
= buf
->seqnum
+ buf
->datalen
;
1144 nextbuf
= i40iw_puda_get_listbuf(rxlist
);
1146 status
= I40IW_ERR_list_empty
;
1149 list_add_tail(&nextbuf
->list
, pbufl
);
1150 if (nextbuf
->seqnum
!= nextseqnum
) {
1151 pfpdu
->bad_seq_num
++;
1152 status
= I40IW_ERR_SEQ_NUM
;
1155 if (nextbuf
->datalen
>= plen
) {
1158 plen
-= nextbuf
->datalen
;
1159 nextseqnum
= nextbuf
->seqnum
+ nextbuf
->datalen
;
1168 * i40iw_ieq_handle_partial - process partial fpdu buffer
1169 * @ieq: ieq resource
1170 * @pfpdu: partial management per user qp
1171 * @buf: receive buffer
1172 * @fpdu_len: fpdu len in the buffer
1174 static enum i40iw_status_code
i40iw_ieq_handle_partial(struct i40iw_puda_rsrc
*ieq
,
1175 struct i40iw_pfpdu
*pfpdu
,
1176 struct i40iw_puda_buf
*buf
,
1179 enum i40iw_status_code status
= 0;
1182 u32 seqnum
= buf
->seqnum
;
1183 struct list_head pbufl
; /* partial buffer list */
1184 struct i40iw_puda_buf
*txbuf
= NULL
;
1185 struct list_head
*rxlist
= &pfpdu
->rxlist
;
1187 INIT_LIST_HEAD(&pbufl
);
1188 list_add(&buf
->list
, &pbufl
);
1190 status
= i40iw_ieq_create_pbufl(pfpdu
, rxlist
, &pbufl
, buf
, fpdu_len
);
1194 txbuf
= i40iw_puda_get_bufpool(ieq
);
1196 pfpdu
->no_tx_bufs
++;
1197 status
= I40IW_ERR_NO_TXBUFS
;
1201 i40iw_ieq_compl_pfpdu(ieq
, rxlist
, &pbufl
, txbuf
, fpdu_len
);
1202 i40iw_ieq_update_tcpip_info(txbuf
, fpdu_len
, seqnum
);
1203 crcptr
= txbuf
->data
+ fpdu_len
- 4;
1204 mpacrc
= *(u32
*)crcptr
;
1205 if (ieq
->check_crc
) {
1206 status
= i40iw_ieq_check_mpacrc(ieq
->hash_desc
, txbuf
->data
,
1207 (fpdu_len
- 4), mpacrc
);
1209 i40iw_debug(ieq
->dev
, I40IW_DEBUG_IEQ
,
1210 "%s: error bad crc\n", __func__
);
1215 i40iw_debug_buf(ieq
->dev
, I40IW_DEBUG_IEQ
, "IEQ TX BUFFER",
1216 txbuf
->mem
.va
, txbuf
->totallen
);
1217 i40iw_puda_send_buf(ieq
, txbuf
);
1218 pfpdu
->rcv_nxt
= seqnum
+ fpdu_len
;
1221 while (!list_empty(&pbufl
)) {
1222 buf
= (struct i40iw_puda_buf
*)(pbufl
.prev
);
1223 list_del(&buf
->list
);
1224 list_add(&buf
->list
, rxlist
);
1227 i40iw_puda_ret_bufpool(ieq
, txbuf
);
1232 * i40iw_ieq_process_buf - process buffer rcvd for ieq
1233 * @ieq: ieq resource
1234 * @pfpdu: partial management per user qp
1235 * @buf: receive buffer
1237 static enum i40iw_status_code
i40iw_ieq_process_buf(struct i40iw_puda_rsrc
*ieq
,
1238 struct i40iw_pfpdu
*pfpdu
,
1239 struct i40iw_puda_buf
*buf
)
1242 u16 datalen
= buf
->datalen
;
1243 u8
*datap
= buf
->data
;
1247 u32 seqnum
= buf
->seqnum
;
1250 bool partial
= false;
1251 struct i40iw_puda_buf
*txbuf
;
1252 struct list_head
*rxlist
= &pfpdu
->rxlist
;
1253 enum i40iw_status_code ret
= 0;
1254 enum i40iw_status_code status
= 0;
1256 ioffset
= (u16
)(buf
->data
- (u8
*)buf
->mem
.va
);
1258 fpdu_len
= i40iw_ieq_get_fpdu_length(ntohs(*(__be16
*)datap
));
1259 if (fpdu_len
> pfpdu
->max_fpdu_data
) {
1260 i40iw_debug(ieq
->dev
, I40IW_DEBUG_IEQ
,
1261 "%s: error bad fpdu_len\n", __func__
);
1262 status
= I40IW_ERR_MPA_CRC
;
1263 list_add(&buf
->list
, rxlist
);
1267 if (datalen
< fpdu_len
) {
1271 crcptr
= datap
+ fpdu_len
- 4;
1272 mpacrc
= *(u32
*)crcptr
;
1274 ret
= i40iw_ieq_check_mpacrc(ieq
->hash_desc
,
1275 datap
, fpdu_len
- 4, mpacrc
);
1277 status
= I40IW_ERR_MPA_CRC
;
1278 list_add(&buf
->list
, rxlist
);
1282 pfpdu
->fpdu_processed
++;
1285 datalen
-= fpdu_len
;
1288 /* copy full pdu's in the txbuf and send them out */
1289 txbuf
= i40iw_puda_get_bufpool(ieq
);
1291 pfpdu
->no_tx_bufs
++;
1292 status
= I40IW_ERR_NO_TXBUFS
;
1293 list_add(&buf
->list
, rxlist
);
1296 /* modify txbuf's buffer header */
1297 i40iw_ieq_setup_tx_buf(buf
, txbuf
);
1298 /* copy full fpdu's to new buffer */
1299 i40iw_ieq_copy_to_txbuf(buf
, txbuf
, ioffset
, buf
->hdrlen
,
1301 txbuf
->totallen
= buf
->hdrlen
+ length
;
1303 i40iw_ieq_update_tcpip_info(txbuf
, length
, buf
->seqnum
);
1304 i40iw_puda_send_buf(ieq
, txbuf
);
1307 pfpdu
->rcv_nxt
= buf
->seqnum
+ length
;
1308 i40iw_puda_ret_bufpool(ieq
, buf
);
1312 buf
->seqnum
= seqnum
+ length
;
1313 buf
->datalen
= datalen
;
1314 pfpdu
->rcv_nxt
= buf
->seqnum
;
1317 status
= i40iw_ieq_handle_partial(ieq
, pfpdu
, buf
, fpdu_len
);
1323 * i40iw_ieq_process_fpdus - process fpdu's buffers on its list
1324 * @qp: qp for which partial fpdus
1325 * @ieq: ieq resource
1327 static void i40iw_ieq_process_fpdus(struct i40iw_sc_qp
*qp
,
1328 struct i40iw_puda_rsrc
*ieq
)
1330 struct i40iw_pfpdu
*pfpdu
= &qp
->pfpdu
;
1331 struct list_head
*rxlist
= &pfpdu
->rxlist
;
1332 struct i40iw_puda_buf
*buf
;
1333 enum i40iw_status_code status
;
1336 if (list_empty(rxlist
))
1338 buf
= i40iw_puda_get_listbuf(rxlist
);
1340 i40iw_debug(ieq
->dev
, I40IW_DEBUG_IEQ
,
1341 "%s: error no buf\n", __func__
);
1344 if (buf
->seqnum
!= pfpdu
->rcv_nxt
) {
1345 /* This could be out of order or missing packet */
1346 pfpdu
->out_of_order
++;
1347 list_add(&buf
->list
, rxlist
);
1350 /* keep processing buffers from the head of the list */
1351 status
= i40iw_ieq_process_buf(ieq
, pfpdu
, buf
);
1352 if (status
== I40IW_ERR_MPA_CRC
) {
1353 pfpdu
->mpa_crc_err
= true;
1354 while (!list_empty(rxlist
)) {
1355 buf
= i40iw_puda_get_listbuf(rxlist
);
1356 i40iw_puda_ret_bufpool(ieq
, buf
);
1359 /* create CQP for AE */
1360 i40iw_ieq_mpa_crc_ae(ieq
->dev
, qp
);
1366 * i40iw_ieq_handle_exception - handle qp's exception
1367 * @ieq: ieq resource
1368 * @qp: qp receiving excpetion
1369 * @buf: receive buffer
1371 static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc
*ieq
,
1372 struct i40iw_sc_qp
*qp
,
1373 struct i40iw_puda_buf
*buf
)
1375 struct i40iw_puda_buf
*tmpbuf
= NULL
;
1376 struct i40iw_pfpdu
*pfpdu
= &qp
->pfpdu
;
1377 u32
*hw_host_ctx
= (u32
*)qp
->hw_host_ctx
;
1378 u32 rcv_wnd
= hw_host_ctx
[23];
1379 /* first partial seq # in q2 */
1380 u32 fps
= *(u32
*)(qp
->q2_buf
+ Q2_FPSN_OFFSET
);
1381 struct list_head
*rxlist
= &pfpdu
->rxlist
;
1382 struct list_head
*plist
;
1384 pfpdu
->total_ieq_bufs
++;
1386 if (pfpdu
->mpa_crc_err
) {
1390 if (pfpdu
->mode
&& (fps
!= pfpdu
->fps
)) {
1391 /* clean up qp as it is new partial sequence */
1392 i40iw_ieq_cleanup_qp(ieq
, qp
);
1393 i40iw_debug(ieq
->dev
, I40IW_DEBUG_IEQ
,
1394 "%s: restarting new partial\n", __func__
);
1395 pfpdu
->mode
= false;
1399 i40iw_debug_buf(ieq
->dev
, I40IW_DEBUG_IEQ
, "Q2 BUFFER", (u64
*)qp
->q2_buf
, 128);
1400 /* First_Partial_Sequence_Number check */
1401 pfpdu
->rcv_nxt
= fps
;
1404 pfpdu
->max_fpdu_data
= (buf
->ipv4
) ? (ieq
->vsi
->mtu
- I40IW_MTU_TO_MSS_IPV4
) :
1405 (ieq
->vsi
->mtu
- I40IW_MTU_TO_MSS_IPV6
);
1406 pfpdu
->pmode_count
++;
1407 INIT_LIST_HEAD(rxlist
);
1408 i40iw_ieq_check_first_buf(buf
, fps
);
1411 if (!(rcv_wnd
>= (buf
->seqnum
- pfpdu
->rcv_nxt
))) {
1412 pfpdu
->bad_seq_num
++;
1416 if (!list_empty(rxlist
)) {
1417 tmpbuf
= (struct i40iw_puda_buf
*)rxlist
->next
;
1418 while ((struct list_head
*)tmpbuf
!= rxlist
) {
1419 if ((int)(buf
->seqnum
- tmpbuf
->seqnum
) < 0)
1421 plist
= &tmpbuf
->list
;
1422 tmpbuf
= (struct i40iw_puda_buf
*)plist
->next
;
1424 /* Insert buf before tmpbuf */
1425 list_add_tail(&buf
->list
, &tmpbuf
->list
);
1427 list_add_tail(&buf
->list
, rxlist
);
1429 i40iw_ieq_process_fpdus(qp
, ieq
);
1432 i40iw_puda_ret_bufpool(ieq
, buf
);
1436 * i40iw_ieq_receive - received exception buffer
1437 * @dev: iwarp device
1438 * @buf: exception buffer received
1440 static void i40iw_ieq_receive(struct i40iw_sc_vsi
*vsi
,
1441 struct i40iw_puda_buf
*buf
)
1443 struct i40iw_puda_rsrc
*ieq
= vsi
->ieq
;
1444 struct i40iw_sc_qp
*qp
= NULL
;
1445 u32 wqe_idx
= ieq
->compl_rxwqe_idx
;
1447 qp
= i40iw_ieq_get_qp(vsi
->dev
, buf
);
1449 ieq
->stats_bad_qp_id
++;
1450 i40iw_puda_ret_bufpool(ieq
, buf
);
1452 i40iw_ieq_handle_exception(ieq
, qp
, buf
);
1455 * ieq->rx_wqe_idx is used by i40iw_puda_replenish_rq()
1456 * on which wqe_idx to start replenish rq
1458 if (!ieq
->rxq_invalid_cnt
)
1459 ieq
->rx_wqe_idx
= wqe_idx
;
1460 ieq
->rxq_invalid_cnt
++;
1464 * i40iw_ieq_tx_compl - put back after sending completed exception buffer
1465 * @vsi: pointer to the vsi structure
1466 * @sqwrid: pointer to puda buffer
1468 static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi
*vsi
, void *sqwrid
)
1470 struct i40iw_puda_rsrc
*ieq
= vsi
->ieq
;
1471 struct i40iw_puda_buf
*buf
= (struct i40iw_puda_buf
*)sqwrid
;
1473 i40iw_puda_ret_bufpool(ieq
, buf
);
1477 * i40iw_ieq_cleanup_qp - qp is being destroyed
1478 * @ieq: ieq resource
1479 * @qp: all pending fpdu buffers
1481 void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc
*ieq
, struct i40iw_sc_qp
*qp
)
1483 struct i40iw_puda_buf
*buf
;
1484 struct i40iw_pfpdu
*pfpdu
= &qp
->pfpdu
;
1485 struct list_head
*rxlist
= &pfpdu
->rxlist
;
1489 while (!list_empty(rxlist
)) {
1490 buf
= i40iw_puda_get_listbuf(rxlist
);
1491 i40iw_puda_ret_bufpool(ieq
, buf
);