1 /*******************************************************************************
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 *******************************************************************************/
35 #include "i40iw_osdep.h"
36 #include "i40iw_register.h"
37 #include "i40iw_status.h"
38 #include "i40iw_hmc.h"
41 #include "i40iw_type.h"
44 #include "i40iw_virtchnl.h"
47 * i40iw_insert_wqe_hdr - write wqe header
48 * @wqe: cqp wqe for header
49 * @header: header for the cqp wqe
51 static inline void i40iw_insert_wqe_hdr(u64
*wqe
, u64 header
)
53 wmb(); /* make sure WQE is populated before polarity is set */
54 set_64bit_val(wqe
, 24, header
);
58 * i40iw_get_cqp_reg_info - get head and tail for cqp using registers
59 * @cqp: struct for cqp hw
60 * @val: cqp tail register value
61 * @tail:wqtail register value
62 * @error: cqp processing err
64 static inline void i40iw_get_cqp_reg_info(struct i40iw_sc_cqp
*cqp
,
69 if (cqp
->dev
->is_pf
) {
70 *val
= i40iw_rd32(cqp
->dev
->hw
, I40E_PFPE_CQPTAIL
);
71 *tail
= RS_32(*val
, I40E_PFPE_CQPTAIL_WQTAIL
);
72 *error
= RS_32(*val
, I40E_PFPE_CQPTAIL_CQP_OP_ERR
);
74 *val
= i40iw_rd32(cqp
->dev
->hw
, I40E_VFPE_CQPTAIL1
);
75 *tail
= RS_32(*val
, I40E_VFPE_CQPTAIL_WQTAIL
);
76 *error
= RS_32(*val
, I40E_VFPE_CQPTAIL_CQP_OP_ERR
);
81 * i40iw_cqp_poll_registers - poll cqp registers
82 * @cqp: struct for cqp hw
83 * @tail:wqtail register value
84 * @count: how many times to try for completion
86 static enum i40iw_status_code
i40iw_cqp_poll_registers(
87 struct i40iw_sc_cqp
*cqp
,
92 u32 newtail
, error
, val
;
96 i40iw_get_cqp_reg_info(cqp
, &val
, &newtail
, &error
);
98 error
= (cqp
->dev
->is_pf
) ?
99 i40iw_rd32(cqp
->dev
->hw
, I40E_PFPE_CQPERRCODES
) :
100 i40iw_rd32(cqp
->dev
->hw
, I40E_VFPE_CQPERRCODES1
);
101 return I40IW_ERR_CQP_COMPL_ERROR
;
103 if (newtail
!= tail
) {
105 I40IW_RING_MOVE_TAIL(cqp
->sq_ring
);
108 udelay(I40IW_SLEEP_COUNT
);
110 return I40IW_ERR_TIMEOUT
;
114 * i40iw_sc_parse_fpm_commit_buf - parse fpm commit buffer
115 * @buf: ptr to fpm commit buffer
116 * @info: ptr to i40iw_hmc_obj_info struct
117 * @sd: number of SDs for HMC objects
119 * parses fpm commit info and copy base value
120 * of hmc objects in hmc_info
122 static enum i40iw_status_code
i40iw_sc_parse_fpm_commit_buf(
124 struct i40iw_hmc_obj_info
*info
,
134 /* copy base values in obj_info */
135 for (i
= I40IW_HMC_IW_QP
, j
= 0;
136 i
<= I40IW_HMC_IW_PBLE
; i
++, j
+= 8) {
137 get_64bit_val(buf
, j
, &temp
);
138 info
[i
].base
= RS_64_1(temp
, 32) * 512;
139 if (info
[i
].base
> base
) {
147 size
= info
[k
].cnt
* info
[k
].size
+ info
[k
].base
;
149 *sd
= (u32
)((size
>> 21) + 1); /* add 1 for remainder */
151 *sd
= (u32
)(size
>> 21);
157 * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
158 * @buf: ptr to fpm query buffer
159 * @info: ptr to i40iw_hmc_obj_info struct
160 * @hmc_fpm_misc: ptr to fpm data
162 * parses fpm query buffer and copy max_cnt and
163 * size value of hmc objects in hmc_info
165 static enum i40iw_status_code
i40iw_sc_parse_fpm_query_buf(
167 struct i40iw_hmc_info
*hmc_info
,
168 struct i40iw_hmc_fpm_misc
*hmc_fpm_misc
)
171 struct i40iw_hmc_obj_info
*obj_info
;
175 obj_info
= hmc_info
->hmc_obj
;
177 get_64bit_val(buf
, 0, &temp
);
178 hmc_info
->first_sd_index
= (u16
)RS_64(temp
, I40IW_QUERY_FPM_FIRST_PE_SD_INDEX
);
179 max_pe_sds
= (u16
)RS_64(temp
, I40IW_QUERY_FPM_MAX_PE_SDS
);
181 /* Reduce SD count for VFs by 1 to account for PBLE backing page rounding */
182 if (hmc_info
->hmc_fn_id
>= I40IW_FIRST_VF_FPM_ID
)
184 hmc_fpm_misc
->max_sds
= max_pe_sds
;
185 hmc_info
->sd_table
.sd_cnt
= max_pe_sds
+ hmc_info
->first_sd_index
;
187 for (i
= I40IW_HMC_IW_QP
, j
= 8;
188 i
<= I40IW_HMC_IW_ARP
; i
++, j
+= 8) {
189 get_64bit_val(buf
, j
, &temp
);
190 if (i
== I40IW_HMC_IW_QP
)
191 obj_info
[i
].max_cnt
= (u32
)RS_64(temp
, I40IW_QUERY_FPM_MAX_QPS
);
192 else if (i
== I40IW_HMC_IW_CQ
)
193 obj_info
[i
].max_cnt
= (u32
)RS_64(temp
, I40IW_QUERY_FPM_MAX_CQS
);
195 obj_info
[i
].max_cnt
= (u32
)temp
;
197 size
= (u32
)RS_64_1(temp
, 32);
198 obj_info
[i
].size
= ((u64
)1 << size
);
200 for (i
= I40IW_HMC_IW_MR
, j
= 48;
201 i
<= I40IW_HMC_IW_PBLE
; i
++, j
+= 8) {
202 get_64bit_val(buf
, j
, &temp
);
203 obj_info
[i
].max_cnt
= (u32
)temp
;
204 size
= (u32
)RS_64_1(temp
, 32);
205 obj_info
[i
].size
= LS_64_1(1, size
);
208 get_64bit_val(buf
, 120, &temp
);
209 hmc_fpm_misc
->max_ceqs
= (u8
)RS_64(temp
, I40IW_QUERY_FPM_MAX_CEQS
);
210 get_64bit_val(buf
, 120, &temp
);
211 hmc_fpm_misc
->ht_multiplier
= RS_64(temp
, I40IW_QUERY_FPM_HTMULTIPLIER
);
212 get_64bit_val(buf
, 120, &temp
);
213 hmc_fpm_misc
->timer_bucket
= RS_64(temp
, I40IW_QUERY_FPM_TIMERBUCKET
);
214 get_64bit_val(buf
, 64, &temp
);
215 hmc_fpm_misc
->xf_block_size
= RS_64(temp
, I40IW_QUERY_FPM_XFBLOCKSIZE
);
216 if (!hmc_fpm_misc
->xf_block_size
)
217 return I40IW_ERR_INVALID_SIZE
;
218 get_64bit_val(buf
, 80, &temp
);
219 hmc_fpm_misc
->q1_block_size
= RS_64(temp
, I40IW_QUERY_FPM_Q1BLOCKSIZE
);
220 if (!hmc_fpm_misc
->q1_block_size
)
221 return I40IW_ERR_INVALID_SIZE
;
226 * i40iw_sc_pd_init - initialize sc pd struct
227 * @dev: sc device struct
229 * @pd_id: pd_id for allocated pd
231 static void i40iw_sc_pd_init(struct i40iw_sc_dev
*dev
,
232 struct i40iw_sc_pd
*pd
,
235 pd
->size
= sizeof(*pd
);
241 * i40iw_get_encoded_wqe_size - given wq size, returns hardware encoded size
242 * @wqsize: size of the wq (sq, rq, srq) to encoded_size
243 * @cqpsq: encoded size for sq for cqp as its encoded size is 1+ other wq's
245 u8
i40iw_get_encoded_wqe_size(u32 wqsize
, bool cqpsq
)
249 /* cqp sq's hw coded value starts from 1 for size of 4
250 * while it starts from 0 for qp' wq's.
261 * i40iw_sc_cqp_init - Initialize buffers for a control Queue Pair
262 * @cqp: IWARP control queue pair pointer
263 * @info: IWARP control queue pair init info pointer
265 * Initializes the object and context buffers for a control Queue Pair.
267 static enum i40iw_status_code
i40iw_sc_cqp_init(struct i40iw_sc_cqp
*cqp
,
268 struct i40iw_cqp_init_info
*info
)
272 if ((info
->sq_size
> I40IW_CQP_SW_SQSIZE_2048
) ||
273 (info
->sq_size
< I40IW_CQP_SW_SQSIZE_4
) ||
274 ((info
->sq_size
& (info
->sq_size
- 1))))
275 return I40IW_ERR_INVALID_SIZE
;
277 hw_sq_size
= i40iw_get_encoded_wqe_size(info
->sq_size
, true);
278 cqp
->size
= sizeof(*cqp
);
279 cqp
->sq_size
= info
->sq_size
;
280 cqp
->hw_sq_size
= hw_sq_size
;
281 cqp
->sq_base
= info
->sq
;
282 cqp
->host_ctx
= info
->host_ctx
;
283 cqp
->sq_pa
= info
->sq_pa
;
284 cqp
->host_ctx_pa
= info
->host_ctx_pa
;
285 cqp
->dev
= info
->dev
;
286 cqp
->struct_ver
= info
->struct_ver
;
287 cqp
->scratch_array
= info
->scratch_array
;
289 cqp
->en_datacenter_tcp
= info
->en_datacenter_tcp
;
290 cqp
->enabled_vf_count
= info
->enabled_vf_count
;
291 cqp
->hmc_profile
= info
->hmc_profile
;
292 info
->dev
->cqp
= cqp
;
294 I40IW_RING_INIT(cqp
->sq_ring
, cqp
->sq_size
);
295 i40iw_debug(cqp
->dev
, I40IW_DEBUG_WQE
,
296 "%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n",
297 __func__
, cqp
->sq_size
, cqp
->hw_sq_size
,
298 cqp
->sq_base
, cqp
->sq_pa
, cqp
, cqp
->polarity
);
303 * i40iw_sc_cqp_create - create cqp during bringup
304 * @cqp: struct for cqp hw
305 * @disable_pfpdus: if pfpdu to be disabled
306 * @maj_err: If error, major err number
307 * @min_err: If error, minor err number
309 static enum i40iw_status_code
i40iw_sc_cqp_create(struct i40iw_sc_cqp
*cqp
,
315 u32 cnt
= 0, p1
, p2
, val
= 0, err_code
;
316 enum i40iw_status_code ret_code
;
318 ret_code
= i40iw_allocate_dma_mem(cqp
->dev
->hw
,
321 I40IW_SD_BUF_ALIGNMENT
);
326 temp
= LS_64(cqp
->hw_sq_size
, I40IW_CQPHC_SQSIZE
) |
327 LS_64(cqp
->struct_ver
, I40IW_CQPHC_SVER
);
330 temp
|= LS_64(1, I40IW_CQPHC_DISABLE_PFPDUS
);
332 set_64bit_val(cqp
->host_ctx
, 0, temp
);
333 set_64bit_val(cqp
->host_ctx
, 8, cqp
->sq_pa
);
334 temp
= LS_64(cqp
->enabled_vf_count
, I40IW_CQPHC_ENABLED_VFS
) |
335 LS_64(cqp
->hmc_profile
, I40IW_CQPHC_HMC_PROFILE
);
336 set_64bit_val(cqp
->host_ctx
, 16, temp
);
337 set_64bit_val(cqp
->host_ctx
, 24, (uintptr_t)cqp
);
338 set_64bit_val(cqp
->host_ctx
, 32, 0);
339 set_64bit_val(cqp
->host_ctx
, 40, 0);
340 set_64bit_val(cqp
->host_ctx
, 48, 0);
341 set_64bit_val(cqp
->host_ctx
, 56, 0);
343 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "CQP_HOST_CTX",
344 cqp
->host_ctx
, I40IW_CQP_CTX_SIZE
* 8);
346 p1
= RS_32_1(cqp
->host_ctx_pa
, 32);
347 p2
= (u32
)cqp
->host_ctx_pa
;
349 if (cqp
->dev
->is_pf
) {
350 i40iw_wr32(cqp
->dev
->hw
, I40E_PFPE_CCQPHIGH
, p1
);
351 i40iw_wr32(cqp
->dev
->hw
, I40E_PFPE_CCQPLOW
, p2
);
353 i40iw_wr32(cqp
->dev
->hw
, I40E_VFPE_CCQPHIGH1
, p1
);
354 i40iw_wr32(cqp
->dev
->hw
, I40E_VFPE_CCQPLOW1
, p2
);
357 if (cnt
++ > I40IW_DONE_COUNT
) {
358 i40iw_free_dma_mem(cqp
->dev
->hw
, &cqp
->sdbuf
);
359 ret_code
= I40IW_ERR_TIMEOUT
;
361 * read PFPE_CQPERRORCODES register to get the minor
362 * and major error code
365 err_code
= i40iw_rd32(cqp
->dev
->hw
, I40E_PFPE_CQPERRCODES
);
367 err_code
= i40iw_rd32(cqp
->dev
->hw
, I40E_VFPE_CQPERRCODES1
);
368 *min_err
= RS_32(err_code
, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE
);
369 *maj_err
= RS_32(err_code
, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE
);
372 udelay(I40IW_SLEEP_COUNT
);
374 val
= i40iw_rd32(cqp
->dev
->hw
, I40E_PFPE_CCQPSTATUS
);
376 val
= i40iw_rd32(cqp
->dev
->hw
, I40E_VFPE_CCQPSTATUS1
);
381 cqp
->process_cqp_sds
= i40iw_update_sds_noccq
;
386 * i40iw_sc_cqp_post_sq - post of cqp's sq
387 * @cqp: struct for cqp hw
389 void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp
*cqp
)
392 i40iw_wr32(cqp
->dev
->hw
, I40E_PFPE_CQPDB
, I40IW_RING_GETCURRENT_HEAD(cqp
->sq_ring
));
394 i40iw_wr32(cqp
->dev
->hw
, I40E_VFPE_CQPDB1
, I40IW_RING_GETCURRENT_HEAD(cqp
->sq_ring
));
396 i40iw_debug(cqp
->dev
,
398 "%s: HEAD_TAIL[%04d,%04d,%04d]\n",
406 * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
407 * @cqp: struct for cqp hw
408 * @wqe_idx: we index of cqp ring
410 u64
*i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp
*cqp
, u64 scratch
)
414 enum i40iw_status_code ret_code
;
416 if (I40IW_RING_FULL_ERR(cqp
->sq_ring
)) {
417 i40iw_debug(cqp
->dev
,
419 "%s: ring is full head %x tail %x size %x\n",
426 I40IW_ATOMIC_RING_MOVE_HEAD(cqp
->sq_ring
, wqe_idx
, ret_code
);
430 cqp
->polarity
= !cqp
->polarity
;
432 wqe
= cqp
->sq_base
[wqe_idx
].elem
;
433 cqp
->scratch_array
[wqe_idx
] = scratch
;
434 I40IW_CQP_INIT_WQE(wqe
);
440 * i40iw_sc_cqp_destroy - destroy cqp during close
441 * @cqp: struct for cqp hw
443 static enum i40iw_status_code
i40iw_sc_cqp_destroy(struct i40iw_sc_cqp
*cqp
)
445 u32 cnt
= 0, val
= 1;
446 enum i40iw_status_code ret_code
= 0;
449 if (cqp
->dev
->is_pf
) {
450 i40iw_wr32(cqp
->dev
->hw
, I40E_PFPE_CCQPHIGH
, 0);
451 i40iw_wr32(cqp
->dev
->hw
, I40E_PFPE_CCQPLOW
, 0);
452 cqpstat_addr
= I40E_PFPE_CCQPSTATUS
;
454 i40iw_wr32(cqp
->dev
->hw
, I40E_VFPE_CCQPHIGH1
, 0);
455 i40iw_wr32(cqp
->dev
->hw
, I40E_VFPE_CCQPLOW1
, 0);
456 cqpstat_addr
= I40E_VFPE_CCQPSTATUS1
;
459 if (cnt
++ > I40IW_DONE_COUNT
) {
460 ret_code
= I40IW_ERR_TIMEOUT
;
463 udelay(I40IW_SLEEP_COUNT
);
464 val
= i40iw_rd32(cqp
->dev
->hw
, cqpstat_addr
);
467 i40iw_free_dma_mem(cqp
->dev
->hw
, &cqp
->sdbuf
);
472 * i40iw_sc_ccq_arm - enable intr for control cq
473 * @ccq: ccq sc struct
475 static void i40iw_sc_ccq_arm(struct i40iw_sc_cq
*ccq
)
482 /* write to cq doorbell shadow area */
483 /* arm next se should always be zero */
484 get_64bit_val(ccq
->cq_uk
.shadow_area
, 32, &temp_val
);
486 sw_cq_sel
= (u16
)RS_64(temp_val
, I40IW_CQ_DBSA_SW_CQ_SELECT
);
487 arm_next_se
= (u8
)RS_64(temp_val
, I40IW_CQ_DBSA_ARM_NEXT_SE
);
489 arm_seq_num
= (u8
)RS_64(temp_val
, I40IW_CQ_DBSA_ARM_SEQ_NUM
);
492 temp_val
= LS_64(arm_seq_num
, I40IW_CQ_DBSA_ARM_SEQ_NUM
) |
493 LS_64(sw_cq_sel
, I40IW_CQ_DBSA_SW_CQ_SELECT
) |
494 LS_64(arm_next_se
, I40IW_CQ_DBSA_ARM_NEXT_SE
) |
495 LS_64(1, I40IW_CQ_DBSA_ARM_NEXT
);
497 set_64bit_val(ccq
->cq_uk
.shadow_area
, 32, temp_val
);
499 wmb(); /* make sure shadow area is updated before arming */
502 i40iw_wr32(ccq
->dev
->hw
, I40E_PFPE_CQARM
, ccq
->cq_uk
.cq_id
);
504 i40iw_wr32(ccq
->dev
->hw
, I40E_VFPE_CQARM1
, ccq
->cq_uk
.cq_id
);
508 * i40iw_sc_ccq_get_cqe_info - get ccq's cq entry
509 * @ccq: ccq sc struct
510 * @info: completion q entry to return
512 static enum i40iw_status_code
i40iw_sc_ccq_get_cqe_info(
513 struct i40iw_sc_cq
*ccq
,
514 struct i40iw_ccq_cqe_info
*info
)
516 u64 qp_ctx
, temp
, temp1
;
518 struct i40iw_sc_cqp
*cqp
;
521 enum i40iw_status_code ret_code
= 0;
523 if (ccq
->cq_uk
.avoid_mem_cflct
)
524 cqe
= (u64
*)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(&ccq
->cq_uk
);
526 cqe
= (u64
*)I40IW_GET_CURRENT_CQ_ELEMENT(&ccq
->cq_uk
);
528 get_64bit_val(cqe
, 24, &temp
);
529 polarity
= (u8
)RS_64(temp
, I40IW_CQ_VALID
);
530 if (polarity
!= ccq
->cq_uk
.polarity
)
531 return I40IW_ERR_QUEUE_EMPTY
;
533 get_64bit_val(cqe
, 8, &qp_ctx
);
534 cqp
= (struct i40iw_sc_cqp
*)(unsigned long)qp_ctx
;
535 info
->error
= (bool)RS_64(temp
, I40IW_CQ_ERROR
);
536 info
->min_err_code
= (u16
)RS_64(temp
, I40IW_CQ_MINERR
);
538 info
->maj_err_code
= (u16
)RS_64(temp
, I40IW_CQ_MAJERR
);
539 info
->min_err_code
= (u16
)RS_64(temp
, I40IW_CQ_MINERR
);
541 wqe_idx
= (u32
)RS_64(temp
, I40IW_CQ_WQEIDX
);
542 info
->scratch
= cqp
->scratch_array
[wqe_idx
];
544 get_64bit_val(cqe
, 16, &temp1
);
545 info
->op_ret_val
= (u32
)RS_64(temp1
, I40IW_CCQ_OPRETVAL
);
546 get_64bit_val(cqp
->sq_base
[wqe_idx
].elem
, 24, &temp1
);
547 info
->op_code
= (u8
)RS_64(temp1
, I40IW_CQPSQ_OPCODE
);
550 /* move the head for cq */
551 I40IW_RING_MOVE_HEAD(ccq
->cq_uk
.cq_ring
, ret_code
);
552 if (I40IW_RING_GETCURRENT_HEAD(ccq
->cq_uk
.cq_ring
) == 0)
553 ccq
->cq_uk
.polarity
^= 1;
555 /* update cq tail in cq shadow memory also */
556 I40IW_RING_MOVE_TAIL(ccq
->cq_uk
.cq_ring
);
557 set_64bit_val(ccq
->cq_uk
.shadow_area
,
559 I40IW_RING_GETCURRENT_HEAD(ccq
->cq_uk
.cq_ring
));
560 wmb(); /* write shadow area before tail */
561 I40IW_RING_MOVE_TAIL(cqp
->sq_ring
);
566 * i40iw_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
567 * @cqp: struct for cqp hw
568 * @op_code: cqp opcode for completion
569 * @info: completion q entry to return
571 static enum i40iw_status_code
i40iw_sc_poll_for_cqp_op_done(
572 struct i40iw_sc_cqp
*cqp
,
574 struct i40iw_ccq_cqe_info
*compl_info
)
576 struct i40iw_ccq_cqe_info info
;
577 struct i40iw_sc_cq
*ccq
;
578 enum i40iw_status_code ret_code
= 0;
581 memset(&info
, 0, sizeof(info
));
584 if (cnt
++ > I40IW_DONE_COUNT
)
585 return I40IW_ERR_TIMEOUT
;
587 if (i40iw_sc_ccq_get_cqe_info(ccq
, &info
)) {
588 udelay(I40IW_SLEEP_COUNT
);
593 ret_code
= I40IW_ERR_CQP_COMPL_ERROR
;
596 /* check if opcode is cq create */
597 if (op_code
!= info
.op_code
) {
598 i40iw_debug(cqp
->dev
, I40IW_DEBUG_WQE
,
599 "%s: opcode mismatch for my op code 0x%x, returned opcode %x\n",
600 __func__
, op_code
, info
.op_code
);
602 /* success, exit out of the loop */
603 if (op_code
== info
.op_code
)
608 memcpy(compl_info
, &info
, sizeof(*compl_info
));
614 * i40iw_sc_manage_push_page - Handle push page
615 * @cqp: struct for cqp hw
616 * @info: push page info
617 * @scratch: u64 saved to be used during cqp completion
618 * @post_sq: flag for cqp db to ring
620 static enum i40iw_status_code
i40iw_sc_manage_push_page(
621 struct i40iw_sc_cqp
*cqp
,
622 struct i40iw_cqp_manage_push_page_info
*info
,
629 if (info
->push_idx
>= I40IW_MAX_PUSH_PAGE_COUNT
)
630 return I40IW_ERR_INVALID_PUSH_PAGE_INDEX
;
632 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
634 return I40IW_ERR_RING_FULL
;
636 set_64bit_val(wqe
, 16, info
->qs_handle
);
638 header
= LS_64(info
->push_idx
, I40IW_CQPSQ_MPP_PPIDX
) |
639 LS_64(I40IW_CQP_OP_MANAGE_PUSH_PAGES
, I40IW_CQPSQ_OPCODE
) |
640 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
) |
641 LS_64(info
->free_page
, I40IW_CQPSQ_MPP_FREE_PAGE
);
643 i40iw_insert_wqe_hdr(wqe
, header
);
645 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "MANAGE_PUSH_PAGES WQE",
646 wqe
, I40IW_CQP_WQE_SIZE
* 8);
649 i40iw_sc_cqp_post_sq(cqp
);
654 * i40iw_sc_manage_hmc_pm_func_table - manage of function table
655 * @cqp: struct for cqp hw
656 * @scratch: u64 saved to be used during cqp completion
657 * @vf_index: vf index for cqp
658 * @free_pm_fcn: function number
659 * @post_sq: flag for cqp db to ring
661 static enum i40iw_status_code
i40iw_sc_manage_hmc_pm_func_table(
662 struct i40iw_sc_cqp
*cqp
,
671 if (vf_index
>= I40IW_MAX_VF_PER_PF
)
672 return I40IW_ERR_INVALID_VF_ID
;
673 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
675 return I40IW_ERR_RING_FULL
;
677 header
= LS_64(vf_index
, I40IW_CQPSQ_MHMC_VFIDX
) |
678 LS_64(I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE
, I40IW_CQPSQ_OPCODE
) |
679 LS_64(free_pm_fcn
, I40IW_CQPSQ_MHMC_FREEPMFN
) |
680 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
682 i40iw_insert_wqe_hdr(wqe
, header
);
683 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "MANAGE_HMC_PM_FUNC_TABLE WQE",
684 wqe
, I40IW_CQP_WQE_SIZE
* 8);
686 i40iw_sc_cqp_post_sq(cqp
);
691 * i40iw_sc_set_hmc_resource_profile - cqp wqe for hmc profile
692 * @cqp: struct for cqp hw
693 * @scratch: u64 saved to be used during cqp completion
694 * @hmc_profile_type: type of profile to set
695 * @vf_num: vf number for profile
696 * @post_sq: flag for cqp db to ring
697 * @poll_registers: flag to poll register for cqp completion
699 static enum i40iw_status_code
i40iw_sc_set_hmc_resource_profile(
700 struct i40iw_sc_cqp
*cqp
,
703 u8 vf_num
, bool post_sq
,
708 u32 val
, tail
, error
;
709 enum i40iw_status_code ret_code
= 0;
711 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
713 return I40IW_ERR_RING_FULL
;
715 set_64bit_val(wqe
, 16,
716 (LS_64(hmc_profile_type
, I40IW_CQPSQ_SHMCRP_HMC_PROFILE
) |
717 LS_64(vf_num
, I40IW_CQPSQ_SHMCRP_VFNUM
)));
719 header
= LS_64(I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE
, I40IW_CQPSQ_OPCODE
) |
720 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
722 i40iw_insert_wqe_hdr(wqe
, header
);
724 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "MANAGE_HMC_PM_FUNC_TABLE WQE",
725 wqe
, I40IW_CQP_WQE_SIZE
* 8);
727 i40iw_get_cqp_reg_info(cqp
, &val
, &tail
, &error
);
729 return I40IW_ERR_CQP_COMPL_ERROR
;
732 i40iw_sc_cqp_post_sq(cqp
);
734 ret_code
= i40iw_cqp_poll_registers(cqp
, tail
, 1000000);
736 ret_code
= i40iw_sc_poll_for_cqp_op_done(cqp
,
737 I40IW_CQP_OP_SHMC_PAGES_ALLOCATED
,
745 * i40iw_sc_manage_hmc_pm_func_table_done - wait for cqp wqe completion for function table
746 * @cqp: struct for cqp hw
748 static enum i40iw_status_code
i40iw_sc_manage_hmc_pm_func_table_done(struct i40iw_sc_cqp
*cqp
)
750 return i40iw_sc_poll_for_cqp_op_done(cqp
, I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE
, NULL
);
754 * i40iw_sc_commit_fpm_values_done - wait for cqp eqe completion for fpm commit
755 * @cqp: struct for cqp hw
757 static enum i40iw_status_code
i40iw_sc_commit_fpm_values_done(struct i40iw_sc_cqp
*cqp
)
759 return i40iw_sc_poll_for_cqp_op_done(cqp
, I40IW_CQP_OP_COMMIT_FPM_VALUES
, NULL
);
763 * i40iw_sc_commit_fpm_values - cqp wqe for commit fpm values
764 * @cqp: struct for cqp hw
765 * @scratch: u64 saved to be used during cqp completion
766 * @hmc_fn_id: hmc function id
767 * @commit_fpm_mem; Memory for fpm values
768 * @post_sq: flag for cqp db to ring
769 * @wait_type: poll ccq or cqp registers for cqp completion
771 static enum i40iw_status_code
i40iw_sc_commit_fpm_values(
772 struct i40iw_sc_cqp
*cqp
,
775 struct i40iw_dma_mem
*commit_fpm_mem
,
781 u32 tail
, val
, error
;
782 enum i40iw_status_code ret_code
= 0;
784 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
786 return I40IW_ERR_RING_FULL
;
788 set_64bit_val(wqe
, 16, hmc_fn_id
);
789 set_64bit_val(wqe
, 32, commit_fpm_mem
->pa
);
791 header
= LS_64(I40IW_CQP_OP_COMMIT_FPM_VALUES
, I40IW_CQPSQ_OPCODE
) |
792 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
794 i40iw_insert_wqe_hdr(wqe
, header
);
796 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "COMMIT_FPM_VALUES WQE",
797 wqe
, I40IW_CQP_WQE_SIZE
* 8);
799 i40iw_get_cqp_reg_info(cqp
, &val
, &tail
, &error
);
801 return I40IW_ERR_CQP_COMPL_ERROR
;
804 i40iw_sc_cqp_post_sq(cqp
);
806 if (wait_type
== I40IW_CQP_WAIT_POLL_REGS
)
807 ret_code
= i40iw_cqp_poll_registers(cqp
, tail
, I40IW_DONE_COUNT
);
808 else if (wait_type
== I40IW_CQP_WAIT_POLL_CQ
)
809 ret_code
= i40iw_sc_commit_fpm_values_done(cqp
);
816 * i40iw_sc_query_fpm_values_done - poll for cqp wqe completion for query fpm
817 * @cqp: struct for cqp hw
819 static enum i40iw_status_code
i40iw_sc_query_fpm_values_done(struct i40iw_sc_cqp
*cqp
)
821 return i40iw_sc_poll_for_cqp_op_done(cqp
, I40IW_CQP_OP_QUERY_FPM_VALUES
, NULL
);
825 * i40iw_sc_query_fpm_values - cqp wqe query fpm values
826 * @cqp: struct for cqp hw
827 * @scratch: u64 saved to be used during cqp completion
828 * @hmc_fn_id: hmc function id
829 * @query_fpm_mem: memory for return fpm values
830 * @post_sq: flag for cqp db to ring
831 * @wait_type: poll ccq or cqp registers for cqp completion
833 static enum i40iw_status_code
i40iw_sc_query_fpm_values(
834 struct i40iw_sc_cqp
*cqp
,
837 struct i40iw_dma_mem
*query_fpm_mem
,
843 u32 tail
, val
, error
;
844 enum i40iw_status_code ret_code
= 0;
846 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
848 return I40IW_ERR_RING_FULL
;
850 set_64bit_val(wqe
, 16, hmc_fn_id
);
851 set_64bit_val(wqe
, 32, query_fpm_mem
->pa
);
853 header
= LS_64(I40IW_CQP_OP_QUERY_FPM_VALUES
, I40IW_CQPSQ_OPCODE
) |
854 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
856 i40iw_insert_wqe_hdr(wqe
, header
);
858 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "QUERY_FPM WQE",
859 wqe
, I40IW_CQP_WQE_SIZE
* 8);
861 /* read the tail from CQP_TAIL register */
862 i40iw_get_cqp_reg_info(cqp
, &val
, &tail
, &error
);
865 return I40IW_ERR_CQP_COMPL_ERROR
;
868 i40iw_sc_cqp_post_sq(cqp
);
869 if (wait_type
== I40IW_CQP_WAIT_POLL_REGS
)
870 ret_code
= i40iw_cqp_poll_registers(cqp
, tail
, I40IW_DONE_COUNT
);
871 else if (wait_type
== I40IW_CQP_WAIT_POLL_CQ
)
872 ret_code
= i40iw_sc_query_fpm_values_done(cqp
);
879 * i40iw_sc_add_arp_cache_entry - cqp wqe add arp cache entry
880 * @cqp: struct for cqp hw
881 * @info: arp entry information
882 * @scratch: u64 saved to be used during cqp completion
883 * @post_sq: flag for cqp db to ring
885 static enum i40iw_status_code
i40iw_sc_add_arp_cache_entry(
886 struct i40iw_sc_cqp
*cqp
,
887 struct i40iw_add_arp_cache_entry_info
*info
,
894 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
896 return I40IW_ERR_RING_FULL
;
897 set_64bit_val(wqe
, 8, info
->reach_max
);
899 temp
= info
->mac_addr
[5] |
900 LS_64_1(info
->mac_addr
[4], 8) |
901 LS_64_1(info
->mac_addr
[3], 16) |
902 LS_64_1(info
->mac_addr
[2], 24) |
903 LS_64_1(info
->mac_addr
[1], 32) |
904 LS_64_1(info
->mac_addr
[0], 40);
906 set_64bit_val(wqe
, 16, temp
);
908 header
= info
->arp_index
|
909 LS_64(I40IW_CQP_OP_MANAGE_ARP
, I40IW_CQPSQ_OPCODE
) |
910 LS_64((info
->permanent
? 1 : 0), I40IW_CQPSQ_MAT_PERMANENT
) |
911 LS_64(1, I40IW_CQPSQ_MAT_ENTRYVALID
) |
912 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
914 i40iw_insert_wqe_hdr(wqe
, header
);
916 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "ARP_CACHE_ENTRY WQE",
917 wqe
, I40IW_CQP_WQE_SIZE
* 8);
920 i40iw_sc_cqp_post_sq(cqp
);
925 * i40iw_sc_del_arp_cache_entry - dele arp cache entry
926 * @cqp: struct for cqp hw
927 * @scratch: u64 saved to be used during cqp completion
928 * @arp_index: arp index to delete arp entry
929 * @post_sq: flag for cqp db to ring
931 static enum i40iw_status_code
i40iw_sc_del_arp_cache_entry(
932 struct i40iw_sc_cqp
*cqp
,
940 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
942 return I40IW_ERR_RING_FULL
;
945 LS_64(I40IW_CQP_OP_MANAGE_ARP
, I40IW_CQPSQ_OPCODE
) |
946 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
947 i40iw_insert_wqe_hdr(wqe
, header
);
949 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "ARP_CACHE_DEL_ENTRY WQE",
950 wqe
, I40IW_CQP_WQE_SIZE
* 8);
953 i40iw_sc_cqp_post_sq(cqp
);
958 * i40iw_sc_query_arp_cache_entry - cqp wqe to query arp and arp index
959 * @cqp: struct for cqp hw
960 * @scratch: u64 saved to be used during cqp completion
961 * @arp_index: arp index to delete arp entry
962 * @post_sq: flag for cqp db to ring
964 static enum i40iw_status_code
i40iw_sc_query_arp_cache_entry(
965 struct i40iw_sc_cqp
*cqp
,
973 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
975 return I40IW_ERR_RING_FULL
;
978 LS_64(I40IW_CQP_OP_MANAGE_ARP
, I40IW_CQPSQ_OPCODE
) |
979 LS_64(1, I40IW_CQPSQ_MAT_QUERY
) |
980 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
982 i40iw_insert_wqe_hdr(wqe
, header
);
984 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "QUERY_ARP_CACHE_ENTRY WQE",
985 wqe
, I40IW_CQP_WQE_SIZE
* 8);
988 i40iw_sc_cqp_post_sq(cqp
);
993 * i40iw_sc_manage_apbvt_entry - for adding and deleting apbvt entries
994 * @cqp: struct for cqp hw
995 * @info: info for apbvt entry to add or delete
996 * @scratch: u64 saved to be used during cqp completion
997 * @post_sq: flag for cqp db to ring
999 static enum i40iw_status_code
i40iw_sc_manage_apbvt_entry(
1000 struct i40iw_sc_cqp
*cqp
,
1001 struct i40iw_apbvt_info
*info
,
1008 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1010 return I40IW_ERR_RING_FULL
;
1012 set_64bit_val(wqe
, 16, info
->port
);
1014 header
= LS_64(I40IW_CQP_OP_MANAGE_APBVT
, I40IW_CQPSQ_OPCODE
) |
1015 LS_64(info
->add
, I40IW_CQPSQ_MAPT_ADDPORT
) |
1016 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1018 i40iw_insert_wqe_hdr(wqe
, header
);
1020 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "MANAGE_APBVT WQE",
1021 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1024 i40iw_sc_cqp_post_sq(cqp
);
1029 * i40iw_sc_manage_qhash_table_entry - manage quad hash entries
1030 * @cqp: struct for cqp hw
1031 * @info: info for quad hash to manage
1032 * @scratch: u64 saved to be used during cqp completion
1033 * @post_sq: flag for cqp db to ring
1035 * This is called before connection establishment is started. For passive connections, when
1036 * listener is created, it will call with entry type of I40IW_QHASH_TYPE_TCP_SYN with local
1037 * ip address and tcp port. When SYN is received (passive connections) or
1038 * sent (active connections), this routine is called with entry type of
1039 * I40IW_QHASH_TYPE_TCP_ESTABLISHED and quad is passed in info.
1041 * When iwarp connection is done and its state moves to RTS, the quad hash entry in
1042 * the hardware will point to iwarp's qp number and requires no calls from the driver.
1044 static enum i40iw_status_code
i40iw_sc_manage_qhash_table_entry(
1045 struct i40iw_sc_cqp
*cqp
,
1046 struct i40iw_qhash_table_info
*info
,
1055 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1057 return I40IW_ERR_RING_FULL
;
1059 temp
= info
->mac_addr
[5] |
1060 LS_64_1(info
->mac_addr
[4], 8) |
1061 LS_64_1(info
->mac_addr
[3], 16) |
1062 LS_64_1(info
->mac_addr
[2], 24) |
1063 LS_64_1(info
->mac_addr
[1], 32) |
1064 LS_64_1(info
->mac_addr
[0], 40);
1066 set_64bit_val(wqe
, 0, temp
);
1068 qw1
= LS_64(info
->qp_num
, I40IW_CQPSQ_QHASH_QPN
) |
1069 LS_64(info
->dest_port
, I40IW_CQPSQ_QHASH_DEST_PORT
);
1070 if (info
->ipv4_valid
) {
1073 LS_64(info
->dest_ip
[0], I40IW_CQPSQ_QHASH_ADDR3
));
1077 LS_64(info
->dest_ip
[0], I40IW_CQPSQ_QHASH_ADDR0
) |
1078 LS_64(info
->dest_ip
[1], I40IW_CQPSQ_QHASH_ADDR1
));
1082 LS_64(info
->dest_ip
[2], I40IW_CQPSQ_QHASH_ADDR2
) |
1083 LS_64(info
->dest_ip
[3], I40IW_CQPSQ_QHASH_ADDR3
));
1085 qw2
= LS_64(cqp
->dev
->qs_handle
, I40IW_CQPSQ_QHASH_QS_HANDLE
);
1086 if (info
->vlan_valid
)
1087 qw2
|= LS_64(info
->vlan_id
, I40IW_CQPSQ_QHASH_VLANID
);
1088 set_64bit_val(wqe
, 16, qw2
);
1089 if (info
->entry_type
== I40IW_QHASH_TYPE_TCP_ESTABLISHED
) {
1090 qw1
|= LS_64(info
->src_port
, I40IW_CQPSQ_QHASH_SRC_PORT
);
1091 if (!info
->ipv4_valid
) {
1094 LS_64(info
->src_ip
[0], I40IW_CQPSQ_QHASH_ADDR0
) |
1095 LS_64(info
->src_ip
[1], I40IW_CQPSQ_QHASH_ADDR1
));
1098 LS_64(info
->src_ip
[2], I40IW_CQPSQ_QHASH_ADDR2
) |
1099 LS_64(info
->src_ip
[3], I40IW_CQPSQ_QHASH_ADDR3
));
1103 LS_64(info
->src_ip
[0], I40IW_CQPSQ_QHASH_ADDR3
));
1107 set_64bit_val(wqe
, 8, qw1
);
1108 temp
= LS_64(cqp
->polarity
, I40IW_CQPSQ_QHASH_WQEVALID
) |
1109 LS_64(I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY
, I40IW_CQPSQ_QHASH_OPCODE
) |
1110 LS_64(info
->manage
, I40IW_CQPSQ_QHASH_MANAGE
) |
1111 LS_64(info
->ipv4_valid
, I40IW_CQPSQ_QHASH_IPV4VALID
) |
1112 LS_64(info
->vlan_valid
, I40IW_CQPSQ_QHASH_VLANVALID
) |
1113 LS_64(info
->entry_type
, I40IW_CQPSQ_QHASH_ENTRYTYPE
);
1115 i40iw_insert_wqe_hdr(wqe
, temp
);
1117 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "MANAGE_QHASH WQE",
1118 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1121 i40iw_sc_cqp_post_sq(cqp
);
1126 * i40iw_sc_alloc_local_mac_ipaddr_entry - cqp wqe for loc mac entry
1127 * @cqp: struct for cqp hw
1128 * @scratch: u64 saved to be used during cqp completion
1129 * @post_sq: flag for cqp db to ring
1131 static enum i40iw_status_code
i40iw_sc_alloc_local_mac_ipaddr_entry(
1132 struct i40iw_sc_cqp
*cqp
,
1139 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1141 return I40IW_ERR_RING_FULL
;
1142 header
= LS_64(I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY
, I40IW_CQPSQ_OPCODE
) |
1143 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1145 i40iw_insert_wqe_hdr(wqe
, header
);
1146 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "ALLOCATE_LOCAL_MAC_IPADDR WQE",
1147 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1149 i40iw_sc_cqp_post_sq(cqp
);
1154 * i40iw_sc_add_local_mac_ipaddr_entry - add mac enry
1155 * @cqp: struct for cqp hw
1156 * @info:mac addr info
1157 * @scratch: u64 saved to be used during cqp completion
1158 * @post_sq: flag for cqp db to ring
1160 static enum i40iw_status_code
i40iw_sc_add_local_mac_ipaddr_entry(
1161 struct i40iw_sc_cqp
*cqp
,
1162 struct i40iw_local_mac_ipaddr_entry_info
*info
,
1169 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1171 return I40IW_ERR_RING_FULL
;
1172 temp
= info
->mac_addr
[5] |
1173 LS_64_1(info
->mac_addr
[4], 8) |
1174 LS_64_1(info
->mac_addr
[3], 16) |
1175 LS_64_1(info
->mac_addr
[2], 24) |
1176 LS_64_1(info
->mac_addr
[1], 32) |
1177 LS_64_1(info
->mac_addr
[0], 40);
1179 set_64bit_val(wqe
, 32, temp
);
1181 header
= LS_64(info
->entry_idx
, I40IW_CQPSQ_MLIPA_IPTABLEIDX
) |
1182 LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE
, I40IW_CQPSQ_OPCODE
) |
1183 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1185 i40iw_insert_wqe_hdr(wqe
, header
);
1187 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "ADD_LOCAL_MAC_IPADDR WQE",
1188 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1191 i40iw_sc_cqp_post_sq(cqp
);
1196 * i40iw_sc_del_local_mac_ipaddr_entry - cqp wqe to dele local mac
1197 * @cqp: struct for cqp hw
1198 * @scratch: u64 saved to be used during cqp completion
1199 * @entry_idx: index of mac entry
1200 * @ ignore_ref_count: to force mac adde delete
1201 * @post_sq: flag for cqp db to ring
1203 static enum i40iw_status_code
i40iw_sc_del_local_mac_ipaddr_entry(
1204 struct i40iw_sc_cqp
*cqp
,
1207 u8 ignore_ref_count
,
1213 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1215 return I40IW_ERR_RING_FULL
;
1216 header
= LS_64(entry_idx
, I40IW_CQPSQ_MLIPA_IPTABLEIDX
) |
1217 LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE
, I40IW_CQPSQ_OPCODE
) |
1218 LS_64(1, I40IW_CQPSQ_MLIPA_FREEENTRY
) |
1219 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
) |
1220 LS_64(ignore_ref_count
, I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT
);
1222 i40iw_insert_wqe_hdr(wqe
, header
);
1224 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "DEL_LOCAL_MAC_IPADDR WQE",
1225 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1228 i40iw_sc_cqp_post_sq(cqp
);
1233 * i40iw_sc_cqp_nop - send a nop wqe
1234 * @cqp: struct for cqp hw
1235 * @scratch: u64 saved to be used during cqp completion
1236 * @post_sq: flag for cqp db to ring
1238 static enum i40iw_status_code
i40iw_sc_cqp_nop(struct i40iw_sc_cqp
*cqp
,
1245 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1247 return I40IW_ERR_RING_FULL
;
1248 header
= LS_64(I40IW_CQP_OP_NOP
, I40IW_CQPSQ_OPCODE
) |
1249 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1250 i40iw_insert_wqe_hdr(wqe
, header
);
1251 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "NOP WQE",
1252 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1255 i40iw_sc_cqp_post_sq(cqp
);
1260 * i40iw_sc_ceq_init - initialize ceq
1261 * @ceq: ceq sc structure
1262 * @info: ceq initialization info
1264 static enum i40iw_status_code
i40iw_sc_ceq_init(struct i40iw_sc_ceq
*ceq
,
1265 struct i40iw_ceq_init_info
*info
)
1269 if ((info
->elem_cnt
< I40IW_MIN_CEQ_ENTRIES
) ||
1270 (info
->elem_cnt
> I40IW_MAX_CEQ_ENTRIES
))
1271 return I40IW_ERR_INVALID_SIZE
;
1273 if (info
->ceq_id
>= I40IW_MAX_CEQID
)
1274 return I40IW_ERR_INVALID_CEQ_ID
;
1276 pble_obj_cnt
= info
->dev
->hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
;
1278 if (info
->virtual_map
&& (info
->first_pm_pbl_idx
>= pble_obj_cnt
))
1279 return I40IW_ERR_INVALID_PBLE_INDEX
;
1281 ceq
->size
= sizeof(*ceq
);
1282 ceq
->ceqe_base
= (struct i40iw_ceqe
*)info
->ceqe_base
;
1283 ceq
->ceq_id
= info
->ceq_id
;
1284 ceq
->dev
= info
->dev
;
1285 ceq
->elem_cnt
= info
->elem_cnt
;
1286 ceq
->ceq_elem_pa
= info
->ceqe_pa
;
1287 ceq
->virtual_map
= info
->virtual_map
;
1289 ceq
->pbl_chunk_size
= (ceq
->virtual_map
? info
->pbl_chunk_size
: 0);
1290 ceq
->first_pm_pbl_idx
= (ceq
->virtual_map
? info
->first_pm_pbl_idx
: 0);
1291 ceq
->pbl_list
= (ceq
->virtual_map
? info
->pbl_list
: NULL
);
1293 ceq
->tph_en
= info
->tph_en
;
1294 ceq
->tph_val
= info
->tph_val
;
1296 I40IW_RING_INIT(ceq
->ceq_ring
, ceq
->elem_cnt
);
1297 ceq
->dev
->ceq
[info
->ceq_id
] = ceq
;
1303 * i40iw_sc_ceq_create - create ceq wqe
1304 * @ceq: ceq sc structure
1305 * @scratch: u64 saved to be used during cqp completion
1306 * @post_sq: flag for cqp db to ring
1308 static enum i40iw_status_code
i40iw_sc_ceq_create(struct i40iw_sc_ceq
*ceq
,
1312 struct i40iw_sc_cqp
*cqp
;
1316 cqp
= ceq
->dev
->cqp
;
1317 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1319 return I40IW_ERR_RING_FULL
;
1320 set_64bit_val(wqe
, 16, ceq
->elem_cnt
);
1321 set_64bit_val(wqe
, 32, (ceq
->virtual_map
? 0 : ceq
->ceq_elem_pa
));
1322 set_64bit_val(wqe
, 48, (ceq
->virtual_map
? ceq
->first_pm_pbl_idx
: 0));
1323 set_64bit_val(wqe
, 56, LS_64(ceq
->tph_val
, I40IW_CQPSQ_TPHVAL
));
1325 header
= ceq
->ceq_id
|
1326 LS_64(I40IW_CQP_OP_CREATE_CEQ
, I40IW_CQPSQ_OPCODE
) |
1327 LS_64(ceq
->pbl_chunk_size
, I40IW_CQPSQ_CEQ_LPBLSIZE
) |
1328 LS_64(ceq
->virtual_map
, I40IW_CQPSQ_CEQ_VMAP
) |
1329 LS_64(ceq
->tph_en
, I40IW_CQPSQ_TPHEN
) |
1330 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1332 i40iw_insert_wqe_hdr(wqe
, header
);
1334 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "CEQ_CREATE WQE",
1335 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1338 i40iw_sc_cqp_post_sq(cqp
);
1343 * i40iw_sc_cceq_create_done - poll for control ceq wqe to complete
1344 * @ceq: ceq sc structure
1346 static enum i40iw_status_code
i40iw_sc_cceq_create_done(struct i40iw_sc_ceq
*ceq
)
1348 struct i40iw_sc_cqp
*cqp
;
1350 cqp
= ceq
->dev
->cqp
;
1351 return i40iw_sc_poll_for_cqp_op_done(cqp
, I40IW_CQP_OP_CREATE_CEQ
, NULL
);
1355 * i40iw_sc_cceq_destroy_done - poll for destroy cceq to complete
1356 * @ceq: ceq sc structure
1358 static enum i40iw_status_code
i40iw_sc_cceq_destroy_done(struct i40iw_sc_ceq
*ceq
)
1360 struct i40iw_sc_cqp
*cqp
;
1362 cqp
= ceq
->dev
->cqp
;
1363 cqp
->process_cqp_sds
= i40iw_update_sds_noccq
;
1364 return i40iw_sc_poll_for_cqp_op_done(cqp
, I40IW_CQP_OP_DESTROY_CEQ
, NULL
);
1368 * i40iw_sc_cceq_create - create cceq
1369 * @ceq: ceq sc structure
1370 * @scratch: u64 saved to be used during cqp completion
1372 static enum i40iw_status_code
i40iw_sc_cceq_create(struct i40iw_sc_ceq
*ceq
, u64 scratch
)
1374 enum i40iw_status_code ret_code
;
1376 ret_code
= i40iw_sc_ceq_create(ceq
, scratch
, true);
1378 ret_code
= i40iw_sc_cceq_create_done(ceq
);
1383 * i40iw_sc_ceq_destroy - destroy ceq
1384 * @ceq: ceq sc structure
1385 * @scratch: u64 saved to be used during cqp completion
1386 * @post_sq: flag for cqp db to ring
1388 static enum i40iw_status_code
i40iw_sc_ceq_destroy(struct i40iw_sc_ceq
*ceq
,
1392 struct i40iw_sc_cqp
*cqp
;
1396 cqp
= ceq
->dev
->cqp
;
1397 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1399 return I40IW_ERR_RING_FULL
;
1400 set_64bit_val(wqe
, 16, ceq
->elem_cnt
);
1401 set_64bit_val(wqe
, 48, ceq
->first_pm_pbl_idx
);
1402 header
= ceq
->ceq_id
|
1403 LS_64(I40IW_CQP_OP_DESTROY_CEQ
, I40IW_CQPSQ_OPCODE
) |
1404 LS_64(ceq
->pbl_chunk_size
, I40IW_CQPSQ_CEQ_LPBLSIZE
) |
1405 LS_64(ceq
->virtual_map
, I40IW_CQPSQ_CEQ_VMAP
) |
1406 LS_64(ceq
->tph_en
, I40IW_CQPSQ_TPHEN
) |
1407 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1408 i40iw_insert_wqe_hdr(wqe
, header
);
1409 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "CEQ_DESTROY WQE",
1410 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1413 i40iw_sc_cqp_post_sq(cqp
);
1418 * i40iw_sc_process_ceq - process ceq
1419 * @dev: sc device struct
1420 * @ceq: ceq sc structure
1422 static void *i40iw_sc_process_ceq(struct i40iw_sc_dev
*dev
, struct i40iw_sc_ceq
*ceq
)
1426 struct i40iw_sc_cq
*cq
= NULL
;
1429 ceqe
= (u64
*)I40IW_GET_CURRENT_CEQ_ELEMENT(ceq
);
1430 get_64bit_val(ceqe
, 0, &temp
);
1431 polarity
= (u8
)RS_64(temp
, I40IW_CEQE_VALID
);
1432 if (polarity
!= ceq
->polarity
)
1435 cq
= (struct i40iw_sc_cq
*)(unsigned long)LS_64_1(temp
, 1);
1437 I40IW_RING_MOVE_TAIL(ceq
->ceq_ring
);
1438 if (I40IW_RING_GETCURRENT_TAIL(ceq
->ceq_ring
) == 0)
1442 i40iw_wr32(dev
->hw
, I40E_PFPE_CQACK
, cq
->cq_uk
.cq_id
);
1444 i40iw_wr32(dev
->hw
, I40E_VFPE_CQACK1
, cq
->cq_uk
.cq_id
);
1450 * i40iw_sc_aeq_init - initialize aeq
1451 * @aeq: aeq structure ptr
1452 * @info: aeq initialization info
1454 static enum i40iw_status_code
i40iw_sc_aeq_init(struct i40iw_sc_aeq
*aeq
,
1455 struct i40iw_aeq_init_info
*info
)
1459 if ((info
->elem_cnt
< I40IW_MIN_AEQ_ENTRIES
) ||
1460 (info
->elem_cnt
> I40IW_MAX_AEQ_ENTRIES
))
1461 return I40IW_ERR_INVALID_SIZE
;
1462 pble_obj_cnt
= info
->dev
->hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
;
1464 if (info
->virtual_map
&& (info
->first_pm_pbl_idx
>= pble_obj_cnt
))
1465 return I40IW_ERR_INVALID_PBLE_INDEX
;
1467 aeq
->size
= sizeof(*aeq
);
1469 aeq
->aeqe_base
= (struct i40iw_sc_aeqe
*)info
->aeqe_base
;
1470 aeq
->dev
= info
->dev
;
1471 aeq
->elem_cnt
= info
->elem_cnt
;
1473 aeq
->aeq_elem_pa
= info
->aeq_elem_pa
;
1474 I40IW_RING_INIT(aeq
->aeq_ring
, aeq
->elem_cnt
);
1475 info
->dev
->aeq
= aeq
;
1477 aeq
->virtual_map
= info
->virtual_map
;
1478 aeq
->pbl_list
= (aeq
->virtual_map
? info
->pbl_list
: NULL
);
1479 aeq
->pbl_chunk_size
= (aeq
->virtual_map
? info
->pbl_chunk_size
: 0);
1480 aeq
->first_pm_pbl_idx
= (aeq
->virtual_map
? info
->first_pm_pbl_idx
: 0);
1481 info
->dev
->aeq
= aeq
;
1486 * i40iw_sc_aeq_create - create aeq
1487 * @aeq: aeq structure ptr
1488 * @scratch: u64 saved to be used during cqp completion
1489 * @post_sq: flag for cqp db to ring
1491 static enum i40iw_status_code
i40iw_sc_aeq_create(struct i40iw_sc_aeq
*aeq
,
1496 struct i40iw_sc_cqp
*cqp
;
1499 cqp
= aeq
->dev
->cqp
;
1500 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1502 return I40IW_ERR_RING_FULL
;
1503 set_64bit_val(wqe
, 16, aeq
->elem_cnt
);
1504 set_64bit_val(wqe
, 32,
1505 (aeq
->virtual_map
? 0 : aeq
->aeq_elem_pa
));
1506 set_64bit_val(wqe
, 48,
1507 (aeq
->virtual_map
? aeq
->first_pm_pbl_idx
: 0));
1509 header
= LS_64(I40IW_CQP_OP_CREATE_AEQ
, I40IW_CQPSQ_OPCODE
) |
1510 LS_64(aeq
->pbl_chunk_size
, I40IW_CQPSQ_AEQ_LPBLSIZE
) |
1511 LS_64(aeq
->virtual_map
, I40IW_CQPSQ_AEQ_VMAP
) |
1512 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1514 i40iw_insert_wqe_hdr(wqe
, header
);
1515 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "AEQ_CREATE WQE",
1516 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1518 i40iw_sc_cqp_post_sq(cqp
);
1523 * i40iw_sc_aeq_destroy - destroy aeq during close
1524 * @aeq: aeq structure ptr
1525 * @scratch: u64 saved to be used during cqp completion
1526 * @post_sq: flag for cqp db to ring
1528 static enum i40iw_status_code
i40iw_sc_aeq_destroy(struct i40iw_sc_aeq
*aeq
,
1533 struct i40iw_sc_cqp
*cqp
;
1536 cqp
= aeq
->dev
->cqp
;
1537 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1539 return I40IW_ERR_RING_FULL
;
1540 set_64bit_val(wqe
, 16, aeq
->elem_cnt
);
1541 set_64bit_val(wqe
, 48, aeq
->first_pm_pbl_idx
);
1542 header
= LS_64(I40IW_CQP_OP_DESTROY_AEQ
, I40IW_CQPSQ_OPCODE
) |
1543 LS_64(aeq
->pbl_chunk_size
, I40IW_CQPSQ_AEQ_LPBLSIZE
) |
1544 LS_64(aeq
->virtual_map
, I40IW_CQPSQ_AEQ_VMAP
) |
1545 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1546 i40iw_insert_wqe_hdr(wqe
, header
);
1548 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "AEQ_DESTROY WQE",
1549 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1551 i40iw_sc_cqp_post_sq(cqp
);
1556 * i40iw_sc_get_next_aeqe - get next aeq entry
1557 * @aeq: aeq structure ptr
1558 * @info: aeqe info to be returned
1560 static enum i40iw_status_code
i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq
*aeq
,
1561 struct i40iw_aeqe_info
*info
)
1563 u64 temp
, compl_ctx
;
1569 aeqe
= (u64
*)I40IW_GET_CURRENT_AEQ_ELEMENT(aeq
);
1570 get_64bit_val(aeqe
, 0, &compl_ctx
);
1571 get_64bit_val(aeqe
, 8, &temp
);
1572 polarity
= (u8
)RS_64(temp
, I40IW_AEQE_VALID
);
1574 if (aeq
->polarity
!= polarity
)
1575 return I40IW_ERR_QUEUE_EMPTY
;
1577 i40iw_debug_buf(aeq
->dev
, I40IW_DEBUG_WQE
, "AEQ_ENTRY", aeqe
, 16);
1579 ae_src
= (u8
)RS_64(temp
, I40IW_AEQE_AESRC
);
1580 wqe_idx
= (u16
)RS_64(temp
, I40IW_AEQE_WQDESCIDX
);
1581 info
->qp_cq_id
= (u32
)RS_64(temp
, I40IW_AEQE_QPCQID
);
1582 info
->ae_id
= (u16
)RS_64(temp
, I40IW_AEQE_AECODE
);
1583 info
->tcp_state
= (u8
)RS_64(temp
, I40IW_AEQE_TCPSTATE
);
1584 info
->iwarp_state
= (u8
)RS_64(temp
, I40IW_AEQE_IWSTATE
);
1585 info
->q2_data_written
= (u8
)RS_64(temp
, I40IW_AEQE_Q2DATA
);
1586 info
->aeqe_overflow
= (bool)RS_64(temp
, I40IW_AEQE_OVERFLOW
);
1588 case I40IW_AE_SOURCE_RQ
:
1589 case I40IW_AE_SOURCE_RQ_0011
:
1591 info
->wqe_idx
= wqe_idx
;
1592 info
->compl_ctx
= compl_ctx
;
1594 case I40IW_AE_SOURCE_CQ
:
1595 case I40IW_AE_SOURCE_CQ_0110
:
1596 case I40IW_AE_SOURCE_CQ_1010
:
1597 case I40IW_AE_SOURCE_CQ_1110
:
1599 info
->compl_ctx
= LS_64_1(compl_ctx
, 1);
1601 case I40IW_AE_SOURCE_SQ
:
1602 case I40IW_AE_SOURCE_SQ_0111
:
1605 info
->wqe_idx
= wqe_idx
;
1606 info
->compl_ctx
= compl_ctx
;
1608 case I40IW_AE_SOURCE_IN_RR_WR
:
1609 case I40IW_AE_SOURCE_IN_RR_WR_1011
:
1611 info
->compl_ctx
= compl_ctx
;
1612 info
->in_rdrsp_wr
= true;
1614 case I40IW_AE_SOURCE_OUT_RR
:
1615 case I40IW_AE_SOURCE_OUT_RR_1111
:
1617 info
->compl_ctx
= compl_ctx
;
1618 info
->out_rdrsp
= true;
1623 I40IW_RING_MOVE_TAIL(aeq
->aeq_ring
);
1624 if (I40IW_RING_GETCURRENT_TAIL(aeq
->aeq_ring
) == 0)
1630 * i40iw_sc_repost_aeq_entries - repost completed aeq entries
1631 * @dev: sc device struct
1632 * @count: allocate count
1634 static enum i40iw_status_code
i40iw_sc_repost_aeq_entries(struct i40iw_sc_dev
*dev
,
1637 if (count
> I40IW_MAX_AEQ_ALLOCATE_COUNT
)
1638 return I40IW_ERR_INVALID_SIZE
;
1641 i40iw_wr32(dev
->hw
, I40E_PFPE_AEQALLOC
, count
);
1643 i40iw_wr32(dev
->hw
, I40E_VFPE_AEQALLOC1
, count
);
1649 * i40iw_sc_aeq_create_done - create aeq
1650 * @aeq: aeq structure ptr
1652 static enum i40iw_status_code
i40iw_sc_aeq_create_done(struct i40iw_sc_aeq
*aeq
)
1654 struct i40iw_sc_cqp
*cqp
;
1656 cqp
= aeq
->dev
->cqp
;
1657 return i40iw_sc_poll_for_cqp_op_done(cqp
, I40IW_CQP_OP_CREATE_AEQ
, NULL
);
1661 * i40iw_sc_aeq_destroy_done - destroy of aeq during close
1662 * @aeq: aeq structure ptr
1664 static enum i40iw_status_code
i40iw_sc_aeq_destroy_done(struct i40iw_sc_aeq
*aeq
)
1666 struct i40iw_sc_cqp
*cqp
;
1668 cqp
= aeq
->dev
->cqp
;
1669 return i40iw_sc_poll_for_cqp_op_done(cqp
, I40IW_CQP_OP_DESTROY_AEQ
, NULL
);
1673 * i40iw_sc_ccq_init - initialize control cq
1674 * @cq: sc's cq ctruct
1675 * @info: info for control cq initialization
1677 static enum i40iw_status_code
i40iw_sc_ccq_init(struct i40iw_sc_cq
*cq
,
1678 struct i40iw_ccq_init_info
*info
)
1682 if (info
->num_elem
< I40IW_MIN_CQ_SIZE
|| info
->num_elem
> I40IW_MAX_CQ_SIZE
)
1683 return I40IW_ERR_INVALID_SIZE
;
1685 if (info
->ceq_id
> I40IW_MAX_CEQID
)
1686 return I40IW_ERR_INVALID_CEQ_ID
;
1688 pble_obj_cnt
= info
->dev
->hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
;
1690 if (info
->virtual_map
&& (info
->first_pm_pbl_idx
>= pble_obj_cnt
))
1691 return I40IW_ERR_INVALID_PBLE_INDEX
;
1693 cq
->cq_pa
= info
->cq_pa
;
1694 cq
->cq_uk
.cq_base
= info
->cq_base
;
1695 cq
->shadow_area_pa
= info
->shadow_area_pa
;
1696 cq
->cq_uk
.shadow_area
= info
->shadow_area
;
1697 cq
->shadow_read_threshold
= info
->shadow_read_threshold
;
1698 cq
->dev
= info
->dev
;
1699 cq
->ceq_id
= info
->ceq_id
;
1700 cq
->cq_uk
.cq_size
= info
->num_elem
;
1701 cq
->cq_type
= I40IW_CQ_TYPE_CQP
;
1702 cq
->ceqe_mask
= info
->ceqe_mask
;
1703 I40IW_RING_INIT(cq
->cq_uk
.cq_ring
, info
->num_elem
);
1705 cq
->cq_uk
.cq_id
= 0; /* control cq is id 0 always */
1706 cq
->ceq_id_valid
= info
->ceq_id_valid
;
1707 cq
->tph_en
= info
->tph_en
;
1708 cq
->tph_val
= info
->tph_val
;
1709 cq
->cq_uk
.avoid_mem_cflct
= info
->avoid_mem_cflct
;
1711 cq
->pbl_list
= info
->pbl_list
;
1712 cq
->virtual_map
= info
->virtual_map
;
1713 cq
->pbl_chunk_size
= info
->pbl_chunk_size
;
1714 cq
->first_pm_pbl_idx
= info
->first_pm_pbl_idx
;
1715 cq
->cq_uk
.polarity
= true;
1717 /* following are only for iw cqs so initialize them to zero */
1718 cq
->cq_uk
.cqe_alloc_reg
= NULL
;
1719 info
->dev
->ccq
= cq
;
1724 * i40iw_sc_ccq_create_done - poll cqp for ccq create
1725 * @ccq: ccq sc struct
1727 static enum i40iw_status_code
i40iw_sc_ccq_create_done(struct i40iw_sc_cq
*ccq
)
1729 struct i40iw_sc_cqp
*cqp
;
1731 cqp
= ccq
->dev
->cqp
;
1732 return i40iw_sc_poll_for_cqp_op_done(cqp
, I40IW_CQP_OP_CREATE_CQ
, NULL
);
1736 * i40iw_sc_ccq_create - create control cq
1737 * @ccq: ccq sc struct
1738 * @scratch: u64 saved to be used during cqp completion
1739 * @check_overflow: overlow flag for ccq
1740 * @post_sq: flag for cqp db to ring
1742 static enum i40iw_status_code
i40iw_sc_ccq_create(struct i40iw_sc_cq
*ccq
,
1744 bool check_overflow
,
1748 struct i40iw_sc_cqp
*cqp
;
1750 enum i40iw_status_code ret_code
;
1752 cqp
= ccq
->dev
->cqp
;
1753 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1755 return I40IW_ERR_RING_FULL
;
1756 set_64bit_val(wqe
, 0, ccq
->cq_uk
.cq_size
);
1757 set_64bit_val(wqe
, 8, RS_64_1(ccq
, 1));
1758 set_64bit_val(wqe
, 16,
1759 LS_64(ccq
->shadow_read_threshold
, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD
));
1760 set_64bit_val(wqe
, 32, (ccq
->virtual_map
? 0 : ccq
->cq_pa
));
1761 set_64bit_val(wqe
, 40, ccq
->shadow_area_pa
);
1762 set_64bit_val(wqe
, 48,
1763 (ccq
->virtual_map
? ccq
->first_pm_pbl_idx
: 0));
1764 set_64bit_val(wqe
, 56,
1765 LS_64(ccq
->tph_val
, I40IW_CQPSQ_TPHVAL
));
1767 header
= ccq
->cq_uk
.cq_id
|
1768 LS_64((ccq
->ceq_id_valid
? ccq
->ceq_id
: 0), I40IW_CQPSQ_CQ_CEQID
) |
1769 LS_64(I40IW_CQP_OP_CREATE_CQ
, I40IW_CQPSQ_OPCODE
) |
1770 LS_64(ccq
->pbl_chunk_size
, I40IW_CQPSQ_CQ_LPBLSIZE
) |
1771 LS_64(check_overflow
, I40IW_CQPSQ_CQ_CHKOVERFLOW
) |
1772 LS_64(ccq
->virtual_map
, I40IW_CQPSQ_CQ_VIRTMAP
) |
1773 LS_64(ccq
->ceqe_mask
, I40IW_CQPSQ_CQ_ENCEQEMASK
) |
1774 LS_64(ccq
->ceq_id_valid
, I40IW_CQPSQ_CQ_CEQIDVALID
) |
1775 LS_64(ccq
->tph_en
, I40IW_CQPSQ_TPHEN
) |
1776 LS_64(ccq
->cq_uk
.avoid_mem_cflct
, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT
) |
1777 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1779 i40iw_insert_wqe_hdr(wqe
, header
);
1781 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "CCQ_CREATE WQE",
1782 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1785 i40iw_sc_cqp_post_sq(cqp
);
1786 ret_code
= i40iw_sc_ccq_create_done(ccq
);
1790 cqp
->process_cqp_sds
= i40iw_cqp_sds_cmd
;
1796 * i40iw_sc_ccq_destroy - destroy ccq during close
1797 * @ccq: ccq sc struct
1798 * @scratch: u64 saved to be used during cqp completion
1799 * @post_sq: flag for cqp db to ring
1801 static enum i40iw_status_code
i40iw_sc_ccq_destroy(struct i40iw_sc_cq
*ccq
,
1805 struct i40iw_sc_cqp
*cqp
;
1808 enum i40iw_status_code ret_code
= 0;
1809 u32 tail
, val
, error
;
1811 cqp
= ccq
->dev
->cqp
;
1812 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1814 return I40IW_ERR_RING_FULL
;
1815 set_64bit_val(wqe
, 0, ccq
->cq_uk
.cq_size
);
1816 set_64bit_val(wqe
, 8, RS_64_1(ccq
, 1));
1817 set_64bit_val(wqe
, 40, ccq
->shadow_area_pa
);
1819 header
= ccq
->cq_uk
.cq_id
|
1820 LS_64((ccq
->ceq_id_valid
? ccq
->ceq_id
: 0), I40IW_CQPSQ_CQ_CEQID
) |
1821 LS_64(I40IW_CQP_OP_DESTROY_CQ
, I40IW_CQPSQ_OPCODE
) |
1822 LS_64(ccq
->ceqe_mask
, I40IW_CQPSQ_CQ_ENCEQEMASK
) |
1823 LS_64(ccq
->ceq_id_valid
, I40IW_CQPSQ_CQ_CEQIDVALID
) |
1824 LS_64(ccq
->tph_en
, I40IW_CQPSQ_TPHEN
) |
1825 LS_64(ccq
->cq_uk
.avoid_mem_cflct
, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT
) |
1826 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1828 i40iw_insert_wqe_hdr(wqe
, header
);
1830 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "CCQ_DESTROY WQE",
1831 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1833 i40iw_get_cqp_reg_info(cqp
, &val
, &tail
, &error
);
1835 return I40IW_ERR_CQP_COMPL_ERROR
;
1838 i40iw_sc_cqp_post_sq(cqp
);
1839 ret_code
= i40iw_cqp_poll_registers(cqp
, tail
, 1000);
1846 * i40iw_sc_cq_init - initialize completion q
1848 * @info: cq initialization info
1850 static enum i40iw_status_code
i40iw_sc_cq_init(struct i40iw_sc_cq
*cq
,
1851 struct i40iw_cq_init_info
*info
)
1853 u32 __iomem
*cqe_alloc_reg
= NULL
;
1854 enum i40iw_status_code ret_code
;
1858 pble_obj_cnt
= info
->dev
->hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
;
1860 if (info
->virtual_map
&& (info
->first_pm_pbl_idx
>= pble_obj_cnt
))
1861 return I40IW_ERR_INVALID_PBLE_INDEX
;
1863 cq
->cq_pa
= info
->cq_base_pa
;
1864 cq
->dev
= info
->dev
;
1865 cq
->ceq_id
= info
->ceq_id
;
1866 arm_offset
= (info
->dev
->is_pf
) ? I40E_PFPE_CQARM
: I40E_VFPE_CQARM1
;
1867 if (i40iw_get_hw_addr(cq
->dev
))
1868 cqe_alloc_reg
= (u32 __iomem
*)(i40iw_get_hw_addr(cq
->dev
) +
1870 info
->cq_uk_init_info
.cqe_alloc_reg
= cqe_alloc_reg
;
1871 ret_code
= i40iw_cq_uk_init(&cq
->cq_uk
, &info
->cq_uk_init_info
);
1874 cq
->virtual_map
= info
->virtual_map
;
1875 cq
->pbl_chunk_size
= info
->pbl_chunk_size
;
1876 cq
->ceqe_mask
= info
->ceqe_mask
;
1877 cq
->cq_type
= (info
->type
) ? info
->type
: I40IW_CQ_TYPE_IWARP
;
1879 cq
->shadow_area_pa
= info
->shadow_area_pa
;
1880 cq
->shadow_read_threshold
= info
->shadow_read_threshold
;
1882 cq
->ceq_id_valid
= info
->ceq_id_valid
;
1883 cq
->tph_en
= info
->tph_en
;
1884 cq
->tph_val
= info
->tph_val
;
1886 cq
->first_pm_pbl_idx
= info
->first_pm_pbl_idx
;
1892 * i40iw_sc_cq_create - create completion q
1894 * @scratch: u64 saved to be used during cqp completion
1895 * @check_overflow: flag for overflow check
1896 * @post_sq: flag for cqp db to ring
1898 static enum i40iw_status_code
i40iw_sc_cq_create(struct i40iw_sc_cq
*cq
,
1900 bool check_overflow
,
1904 struct i40iw_sc_cqp
*cqp
;
1907 if (cq
->cq_uk
.cq_id
> I40IW_MAX_CQID
)
1908 return I40IW_ERR_INVALID_CQ_ID
;
1910 if (cq
->ceq_id
> I40IW_MAX_CEQID
)
1911 return I40IW_ERR_INVALID_CEQ_ID
;
1914 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1916 return I40IW_ERR_RING_FULL
;
1918 set_64bit_val(wqe
, 0, cq
->cq_uk
.cq_size
);
1919 set_64bit_val(wqe
, 8, RS_64_1(cq
, 1));
1922 LS_64(cq
->shadow_read_threshold
, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD
));
1924 set_64bit_val(wqe
, 32, (cq
->virtual_map
? 0 : cq
->cq_pa
));
1926 set_64bit_val(wqe
, 40, cq
->shadow_area_pa
);
1927 set_64bit_val(wqe
, 48, (cq
->virtual_map
? cq
->first_pm_pbl_idx
: 0));
1928 set_64bit_val(wqe
, 56, LS_64(cq
->tph_val
, I40IW_CQPSQ_TPHVAL
));
1930 header
= cq
->cq_uk
.cq_id
|
1931 LS_64((cq
->ceq_id_valid
? cq
->ceq_id
: 0), I40IW_CQPSQ_CQ_CEQID
) |
1932 LS_64(I40IW_CQP_OP_CREATE_CQ
, I40IW_CQPSQ_OPCODE
) |
1933 LS_64(cq
->pbl_chunk_size
, I40IW_CQPSQ_CQ_LPBLSIZE
) |
1934 LS_64(check_overflow
, I40IW_CQPSQ_CQ_CHKOVERFLOW
) |
1935 LS_64(cq
->virtual_map
, I40IW_CQPSQ_CQ_VIRTMAP
) |
1936 LS_64(cq
->ceqe_mask
, I40IW_CQPSQ_CQ_ENCEQEMASK
) |
1937 LS_64(cq
->ceq_id_valid
, I40IW_CQPSQ_CQ_CEQIDVALID
) |
1938 LS_64(cq
->tph_en
, I40IW_CQPSQ_TPHEN
) |
1939 LS_64(cq
->cq_uk
.avoid_mem_cflct
, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT
) |
1940 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1942 i40iw_insert_wqe_hdr(wqe
, header
);
1944 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "CQ_CREATE WQE",
1945 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1948 i40iw_sc_cqp_post_sq(cqp
);
1953 * i40iw_sc_cq_destroy - destroy completion q
1955 * @scratch: u64 saved to be used during cqp completion
1956 * @post_sq: flag for cqp db to ring
1958 static enum i40iw_status_code
i40iw_sc_cq_destroy(struct i40iw_sc_cq
*cq
,
1962 struct i40iw_sc_cqp
*cqp
;
1967 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1969 return I40IW_ERR_RING_FULL
;
1970 set_64bit_val(wqe
, 0, cq
->cq_uk
.cq_size
);
1971 set_64bit_val(wqe
, 8, RS_64_1(cq
, 1));
1972 set_64bit_val(wqe
, 40, cq
->shadow_area_pa
);
1973 set_64bit_val(wqe
, 48, (cq
->virtual_map
? cq
->first_pm_pbl_idx
: 0));
1975 header
= cq
->cq_uk
.cq_id
|
1976 LS_64((cq
->ceq_id_valid
? cq
->ceq_id
: 0), I40IW_CQPSQ_CQ_CEQID
) |
1977 LS_64(I40IW_CQP_OP_DESTROY_CQ
, I40IW_CQPSQ_OPCODE
) |
1978 LS_64(cq
->pbl_chunk_size
, I40IW_CQPSQ_CQ_LPBLSIZE
) |
1979 LS_64(cq
->virtual_map
, I40IW_CQPSQ_CQ_VIRTMAP
) |
1980 LS_64(cq
->ceqe_mask
, I40IW_CQPSQ_CQ_ENCEQEMASK
) |
1981 LS_64(cq
->ceq_id_valid
, I40IW_CQPSQ_CQ_CEQIDVALID
) |
1982 LS_64(cq
->tph_en
, I40IW_CQPSQ_TPHEN
) |
1983 LS_64(cq
->cq_uk
.avoid_mem_cflct
, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT
) |
1984 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1986 i40iw_insert_wqe_hdr(wqe
, header
);
1988 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "CQ_DESTROY WQE",
1989 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1992 i40iw_sc_cqp_post_sq(cqp
);
1997 * i40iw_sc_cq_modify - modify a Completion Queue
1999 * @info: modification info struct
2001 * @post_sq: flag to post to sq
2003 static enum i40iw_status_code
i40iw_sc_cq_modify(struct i40iw_sc_cq
*cq
,
2004 struct i40iw_modify_cq_info
*info
,
2008 struct i40iw_sc_cqp
*cqp
;
2011 u32 cq_size
, ceq_id
, first_pm_pbl_idx
;
2013 bool virtual_map
, ceq_id_valid
, check_overflow
;
2016 if (info
->ceq_valid
&& (info
->ceq_id
> I40IW_MAX_CEQID
))
2017 return I40IW_ERR_INVALID_CEQ_ID
;
2019 pble_obj_cnt
= cq
->dev
->hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
;
2021 if (info
->cq_resize
&& info
->virtual_map
&&
2022 (info
->first_pm_pbl_idx
>= pble_obj_cnt
))
2023 return I40IW_ERR_INVALID_PBLE_INDEX
;
2026 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2028 return I40IW_ERR_RING_FULL
;
2030 cq
->pbl_list
= info
->pbl_list
;
2031 cq
->cq_pa
= info
->cq_pa
;
2032 cq
->first_pm_pbl_idx
= info
->first_pm_pbl_idx
;
2034 cq_size
= info
->cq_resize
? info
->cq_size
: cq
->cq_uk
.cq_size
;
2035 if (info
->ceq_change
) {
2036 ceq_id_valid
= true;
2037 ceq_id
= info
->ceq_id
;
2039 ceq_id_valid
= cq
->ceq_id_valid
;
2040 ceq_id
= ceq_id_valid
? cq
->ceq_id
: 0;
2042 virtual_map
= info
->cq_resize
? info
->virtual_map
: cq
->virtual_map
;
2043 first_pm_pbl_idx
= (info
->cq_resize
?
2044 (info
->virtual_map
? info
->first_pm_pbl_idx
: 0) :
2045 (cq
->virtual_map
? cq
->first_pm_pbl_idx
: 0));
2046 pbl_chunk_size
= (info
->cq_resize
?
2047 (info
->virtual_map
? info
->pbl_chunk_size
: 0) :
2048 (cq
->virtual_map
? cq
->pbl_chunk_size
: 0));
2049 check_overflow
= info
->check_overflow_change
? info
->check_overflow
:
2051 cq
->cq_uk
.cq_size
= cq_size
;
2052 cq
->ceq_id_valid
= ceq_id_valid
;
2053 cq
->ceq_id
= ceq_id
;
2054 cq
->virtual_map
= virtual_map
;
2055 cq
->first_pm_pbl_idx
= first_pm_pbl_idx
;
2056 cq
->pbl_chunk_size
= pbl_chunk_size
;
2057 cq
->check_overflow
= check_overflow
;
2059 set_64bit_val(wqe
, 0, cq_size
);
2060 set_64bit_val(wqe
, 8, RS_64_1(cq
, 1));
2061 set_64bit_val(wqe
, 16,
2062 LS_64(info
->shadow_read_threshold
, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD
));
2063 set_64bit_val(wqe
, 32, (cq
->virtual_map
? 0 : cq
->cq_pa
));
2064 set_64bit_val(wqe
, 40, cq
->shadow_area_pa
);
2065 set_64bit_val(wqe
, 48, (cq
->virtual_map
? first_pm_pbl_idx
: 0));
2066 set_64bit_val(wqe
, 56, LS_64(cq
->tph_val
, I40IW_CQPSQ_TPHVAL
));
2068 header
= cq
->cq_uk
.cq_id
|
2069 LS_64(ceq_id
, I40IW_CQPSQ_CQ_CEQID
) |
2070 LS_64(I40IW_CQP_OP_MODIFY_CQ
, I40IW_CQPSQ_OPCODE
) |
2071 LS_64(info
->cq_resize
, I40IW_CQPSQ_CQ_CQRESIZE
) |
2072 LS_64(pbl_chunk_size
, I40IW_CQPSQ_CQ_LPBLSIZE
) |
2073 LS_64(check_overflow
, I40IW_CQPSQ_CQ_CHKOVERFLOW
) |
2074 LS_64(virtual_map
, I40IW_CQPSQ_CQ_VIRTMAP
) |
2075 LS_64(cq
->ceqe_mask
, I40IW_CQPSQ_CQ_ENCEQEMASK
) |
2076 LS_64(ceq_id_valid
, I40IW_CQPSQ_CQ_CEQIDVALID
) |
2077 LS_64(cq
->tph_en
, I40IW_CQPSQ_TPHEN
) |
2078 LS_64(cq
->cq_uk
.avoid_mem_cflct
, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT
) |
2079 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2081 i40iw_insert_wqe_hdr(wqe
, header
);
2083 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "CQ_MODIFY WQE",
2084 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2087 i40iw_sc_cqp_post_sq(cqp
);
2092 * i40iw_sc_qp_init - initialize qp
2094 * @info: initialization qp info
2096 static enum i40iw_status_code
i40iw_sc_qp_init(struct i40iw_sc_qp
*qp
,
2097 struct i40iw_qp_init_info
*info
)
2099 u32 __iomem
*wqe_alloc_reg
= NULL
;
2100 enum i40iw_status_code ret_code
;
2105 qp
->dev
= info
->pd
->dev
;
2106 qp
->sq_pa
= info
->sq_pa
;
2107 qp
->rq_pa
= info
->rq_pa
;
2108 qp
->hw_host_ctx_pa
= info
->host_ctx_pa
;
2109 qp
->q2_pa
= info
->q2_pa
;
2110 qp
->shadow_area_pa
= info
->shadow_area_pa
;
2112 qp
->q2_buf
= info
->q2
;
2114 qp
->hw_host_ctx
= info
->host_ctx
;
2115 offset
= (qp
->pd
->dev
->is_pf
) ? I40E_PFPE_WQEALLOC
: I40E_VFPE_WQEALLOC1
;
2116 if (i40iw_get_hw_addr(qp
->pd
->dev
))
2117 wqe_alloc_reg
= (u32 __iomem
*)(i40iw_get_hw_addr(qp
->pd
->dev
) +
2120 info
->qp_uk_init_info
.wqe_alloc_reg
= wqe_alloc_reg
;
2121 ret_code
= i40iw_qp_uk_init(&qp
->qp_uk
, &info
->qp_uk_init_info
);
2124 qp
->virtual_map
= info
->virtual_map
;
2126 pble_obj_cnt
= info
->pd
->dev
->hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
;
2128 if ((info
->virtual_map
&& (info
->sq_pa
>= pble_obj_cnt
)) ||
2129 (info
->virtual_map
&& (info
->rq_pa
>= pble_obj_cnt
)))
2130 return I40IW_ERR_INVALID_PBLE_INDEX
;
2132 qp
->llp_stream_handle
= (void *)(-1);
2133 qp
->qp_type
= (info
->type
) ? info
->type
: I40IW_QP_TYPE_IWARP
;
2135 qp
->hw_sq_size
= i40iw_get_encoded_wqe_size(qp
->qp_uk
.sq_ring
.size
,
2137 i40iw_debug(qp
->dev
, I40IW_DEBUG_WQE
, "%s: hw_sq_size[%04d] sq_ring.size[%04d]\n",
2138 __func__
, qp
->hw_sq_size
, qp
->qp_uk
.sq_ring
.size
);
2139 ret_code
= i40iw_fragcnt_to_wqesize_rq(qp
->qp_uk
.max_rq_frag_cnt
,
2143 qp
->hw_rq_size
= i40iw_get_encoded_wqe_size(qp
->qp_uk
.rq_size
*
2144 (wqe_size
/ I40IW_QP_WQE_MIN_SIZE
), false);
2145 i40iw_debug(qp
->dev
, I40IW_DEBUG_WQE
,
2146 "%s: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
2147 __func__
, qp
->hw_rq_size
, qp
->qp_uk
.rq_size
, wqe_size
);
2148 qp
->sq_tph_val
= info
->sq_tph_val
;
2149 qp
->rq_tph_val
= info
->rq_tph_val
;
2150 qp
->sq_tph_en
= info
->sq_tph_en
;
2151 qp
->rq_tph_en
= info
->rq_tph_en
;
2152 qp
->rcv_tph_en
= info
->rcv_tph_en
;
2153 qp
->xmit_tph_en
= info
->xmit_tph_en
;
2154 qp
->qs_handle
= qp
->pd
->dev
->qs_handle
;
2155 qp
->exception_lan_queue
= qp
->pd
->dev
->exception_lan_queue
;
2161 * i40iw_sc_qp_create - create qp
2163 * @info: qp create info
2164 * @scratch: u64 saved to be used during cqp completion
2165 * @post_sq: flag for cqp db to ring
2167 static enum i40iw_status_code
i40iw_sc_qp_create(
2168 struct i40iw_sc_qp
*qp
,
2169 struct i40iw_create_qp_info
*info
,
2173 struct i40iw_sc_cqp
*cqp
;
2177 if ((qp
->qp_uk
.qp_id
< I40IW_MIN_IW_QP_ID
) ||
2178 (qp
->qp_uk
.qp_id
> I40IW_MAX_IW_QP_ID
))
2179 return I40IW_ERR_INVALID_QP_ID
;
2181 cqp
= qp
->pd
->dev
->cqp
;
2182 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2184 return I40IW_ERR_RING_FULL
;
2186 set_64bit_val(wqe
, 16, qp
->hw_host_ctx_pa
);
2188 set_64bit_val(wqe
, 40, qp
->shadow_area_pa
);
2190 header
= qp
->qp_uk
.qp_id
|
2191 LS_64(I40IW_CQP_OP_CREATE_QP
, I40IW_CQPSQ_OPCODE
) |
2192 LS_64((info
->ord_valid
? 1 : 0), I40IW_CQPSQ_QP_ORDVALID
) |
2193 LS_64(info
->tcp_ctx_valid
, I40IW_CQPSQ_QP_TOECTXVALID
) |
2194 LS_64(qp
->qp_type
, I40IW_CQPSQ_QP_QPTYPE
) |
2195 LS_64(qp
->virtual_map
, I40IW_CQPSQ_QP_VQ
) |
2196 LS_64(info
->cq_num_valid
, I40IW_CQPSQ_QP_CQNUMVALID
) |
2197 LS_64(info
->static_rsrc
, I40IW_CQPSQ_QP_STATRSRC
) |
2198 LS_64(info
->arp_cache_idx_valid
, I40IW_CQPSQ_QP_ARPTABIDXVALID
) |
2199 LS_64(info
->next_iwarp_state
, I40IW_CQPSQ_QP_NEXTIWSTATE
) |
2200 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2202 i40iw_insert_wqe_hdr(wqe
, header
);
2203 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "QP_CREATE WQE",
2204 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2207 i40iw_sc_cqp_post_sq(cqp
);
2212 * i40iw_sc_qp_modify - modify qp cqp wqe
2214 * @info: modify qp info
2215 * @scratch: u64 saved to be used during cqp completion
2216 * @post_sq: flag for cqp db to ring
2218 static enum i40iw_status_code
i40iw_sc_qp_modify(
2219 struct i40iw_sc_qp
*qp
,
2220 struct i40iw_modify_qp_info
*info
,
2225 struct i40iw_sc_cqp
*cqp
;
2227 u8 term_actions
= 0;
2230 cqp
= qp
->pd
->dev
->cqp
;
2231 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2233 return I40IW_ERR_RING_FULL
;
2234 if (info
->next_iwarp_state
== I40IW_QP_STATE_TERMINATE
) {
2235 if (info
->dont_send_fin
)
2236 term_actions
+= I40IWQP_TERM_SEND_TERM_ONLY
;
2237 if (info
->dont_send_term
)
2238 term_actions
+= I40IWQP_TERM_SEND_FIN_ONLY
;
2239 if ((term_actions
== I40IWQP_TERM_SEND_TERM_AND_FIN
) ||
2240 (term_actions
== I40IWQP_TERM_SEND_TERM_ONLY
))
2241 term_len
= info
->termlen
;
2246 LS_64(info
->new_mss
, I40IW_CQPSQ_QP_NEWMSS
) |
2247 LS_64(term_len
, I40IW_CQPSQ_QP_TERMLEN
));
2249 set_64bit_val(wqe
, 16, qp
->hw_host_ctx_pa
);
2250 set_64bit_val(wqe
, 40, qp
->shadow_area_pa
);
2252 header
= qp
->qp_uk
.qp_id
|
2253 LS_64(I40IW_CQP_OP_MODIFY_QP
, I40IW_CQPSQ_OPCODE
) |
2254 LS_64(info
->ord_valid
, I40IW_CQPSQ_QP_ORDVALID
) |
2255 LS_64(info
->tcp_ctx_valid
, I40IW_CQPSQ_QP_TOECTXVALID
) |
2256 LS_64(info
->cached_var_valid
, I40IW_CQPSQ_QP_CACHEDVARVALID
) |
2257 LS_64(qp
->virtual_map
, I40IW_CQPSQ_QP_VQ
) |
2258 LS_64(info
->cq_num_valid
, I40IW_CQPSQ_QP_CQNUMVALID
) |
2259 LS_64(info
->force_loopback
, I40IW_CQPSQ_QP_FORCELOOPBACK
) |
2260 LS_64(qp
->qp_type
, I40IW_CQPSQ_QP_QPTYPE
) |
2261 LS_64(info
->mss_change
, I40IW_CQPSQ_QP_MSSCHANGE
) |
2262 LS_64(info
->static_rsrc
, I40IW_CQPSQ_QP_STATRSRC
) |
2263 LS_64(info
->remove_hash_idx
, I40IW_CQPSQ_QP_REMOVEHASHENTRY
) |
2264 LS_64(term_actions
, I40IW_CQPSQ_QP_TERMACT
) |
2265 LS_64(info
->reset_tcp_conn
, I40IW_CQPSQ_QP_RESETCON
) |
2266 LS_64(info
->arp_cache_idx_valid
, I40IW_CQPSQ_QP_ARPTABIDXVALID
) |
2267 LS_64(info
->next_iwarp_state
, I40IW_CQPSQ_QP_NEXTIWSTATE
) |
2268 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2270 i40iw_insert_wqe_hdr(wqe
, header
);
2272 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "QP_MODIFY WQE",
2273 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2276 i40iw_sc_cqp_post_sq(cqp
);
2281 * i40iw_sc_qp_destroy - cqp destroy qp
2283 * @scratch: u64 saved to be used during cqp completion
2284 * @remove_hash_idx: flag if to remove hash idx
2285 * @ignore_mw_bnd: memory window bind flag
2286 * @post_sq: flag for cqp db to ring
2288 static enum i40iw_status_code
i40iw_sc_qp_destroy(
2289 struct i40iw_sc_qp
*qp
,
2291 bool remove_hash_idx
,
2296 struct i40iw_sc_cqp
*cqp
;
2299 cqp
= qp
->pd
->dev
->cqp
;
2300 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2302 return I40IW_ERR_RING_FULL
;
2303 set_64bit_val(wqe
, 16, qp
->hw_host_ctx_pa
);
2304 set_64bit_val(wqe
, 40, qp
->shadow_area_pa
);
2306 header
= qp
->qp_uk
.qp_id
|
2307 LS_64(I40IW_CQP_OP_DESTROY_QP
, I40IW_CQPSQ_OPCODE
) |
2308 LS_64(qp
->qp_type
, I40IW_CQPSQ_QP_QPTYPE
) |
2309 LS_64(ignore_mw_bnd
, I40IW_CQPSQ_QP_IGNOREMWBOUND
) |
2310 LS_64(remove_hash_idx
, I40IW_CQPSQ_QP_REMOVEHASHENTRY
) |
2311 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2313 i40iw_insert_wqe_hdr(wqe
, header
);
2314 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "QP_DESTROY WQE",
2315 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2318 i40iw_sc_cqp_post_sq(cqp
);
2323 * i40iw_sc_qp_flush_wqes - flush qp's wqe
2325 * @info: dlush information
2326 * @scratch: u64 saved to be used during cqp completion
2327 * @post_sq: flag for cqp db to ring
2329 static enum i40iw_status_code
i40iw_sc_qp_flush_wqes(
2330 struct i40iw_sc_qp
*qp
,
2331 struct i40iw_qp_flush_info
*info
,
2337 struct i40iw_sc_cqp
*cqp
;
2339 bool flush_sq
= false, flush_rq
= false;
2341 if (info
->rq
&& !qp
->flush_rq
)
2344 if (info
->sq
&& !qp
->flush_sq
)
2347 qp
->flush_sq
|= flush_sq
;
2348 qp
->flush_rq
|= flush_rq
;
2349 if (!flush_sq
&& !flush_rq
) {
2350 if (info
->ae_code
!= I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR
)
2354 cqp
= qp
->pd
->dev
->cqp
;
2355 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2357 return I40IW_ERR_RING_FULL
;
2358 if (info
->userflushcode
) {
2360 temp
|= LS_64(info
->rq_minor_code
, I40IW_CQPSQ_FWQE_RQMNERR
) |
2361 LS_64(info
->rq_major_code
, I40IW_CQPSQ_FWQE_RQMJERR
);
2364 temp
|= LS_64(info
->sq_minor_code
, I40IW_CQPSQ_FWQE_SQMNERR
) |
2365 LS_64(info
->sq_major_code
, I40IW_CQPSQ_FWQE_SQMJERR
);
2368 set_64bit_val(wqe
, 16, temp
);
2370 temp
= (info
->generate_ae
) ?
2371 info
->ae_code
| LS_64(info
->ae_source
, I40IW_CQPSQ_FWQE_AESOURCE
) : 0;
2373 set_64bit_val(wqe
, 8, temp
);
2375 header
= qp
->qp_uk
.qp_id
|
2376 LS_64(I40IW_CQP_OP_FLUSH_WQES
, I40IW_CQPSQ_OPCODE
) |
2377 LS_64(info
->generate_ae
, I40IW_CQPSQ_FWQE_GENERATE_AE
) |
2378 LS_64(info
->userflushcode
, I40IW_CQPSQ_FWQE_USERFLCODE
) |
2379 LS_64(flush_sq
, I40IW_CQPSQ_FWQE_FLUSHSQ
) |
2380 LS_64(flush_rq
, I40IW_CQPSQ_FWQE_FLUSHRQ
) |
2381 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2383 i40iw_insert_wqe_hdr(wqe
, header
);
2385 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "QP_FLUSH WQE",
2386 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2389 i40iw_sc_cqp_post_sq(cqp
);
2394 * i40iw_sc_qp_upload_context - upload qp's context
2395 * @dev: sc device struct
2396 * @info: upload context info ptr for return
2397 * @scratch: u64 saved to be used during cqp completion
2398 * @post_sq: flag for cqp db to ring
2400 static enum i40iw_status_code
i40iw_sc_qp_upload_context(
2401 struct i40iw_sc_dev
*dev
,
2402 struct i40iw_upload_context_info
*info
,
2407 struct i40iw_sc_cqp
*cqp
;
2411 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2413 return I40IW_ERR_RING_FULL
;
2414 set_64bit_val(wqe
, 16, info
->buf_pa
);
2416 header
= LS_64(info
->qp_id
, I40IW_CQPSQ_UCTX_QPID
) |
2417 LS_64(I40IW_CQP_OP_UPLOAD_CONTEXT
, I40IW_CQPSQ_OPCODE
) |
2418 LS_64(info
->qp_type
, I40IW_CQPSQ_UCTX_QPTYPE
) |
2419 LS_64(info
->raw_format
, I40IW_CQPSQ_UCTX_RAWFORMAT
) |
2420 LS_64(info
->freeze_qp
, I40IW_CQPSQ_UCTX_FREEZEQP
) |
2421 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2423 i40iw_insert_wqe_hdr(wqe
, header
);
2425 i40iw_debug_buf(dev
, I40IW_DEBUG_WQE
, "QP_UPLOAD_CTX WQE",
2426 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2429 i40iw_sc_cqp_post_sq(cqp
);
2434 * i40iw_sc_qp_setctx - set qp's context
2436 * @qp_ctx: context ptr
2439 static enum i40iw_status_code
i40iw_sc_qp_setctx(
2440 struct i40iw_sc_qp
*qp
,
2442 struct i40iw_qp_host_ctx_info
*info
)
2444 struct i40iwarp_offload_info
*iw
;
2445 struct i40iw_tcp_offload_info
*tcp
;
2446 u64 qw0
, qw3
, qw7
= 0;
2448 iw
= info
->iwarp_info
;
2449 tcp
= info
->tcp_info
;
2450 qw0
= LS_64(qp
->qp_uk
.rq_wqe_size
, I40IWQPC_RQWQESIZE
) |
2451 LS_64(info
->err_rq_idx_valid
, I40IWQPC_ERR_RQ_IDX_VALID
) |
2452 LS_64(qp
->rcv_tph_en
, I40IWQPC_RCVTPHEN
) |
2453 LS_64(qp
->xmit_tph_en
, I40IWQPC_XMITTPHEN
) |
2454 LS_64(qp
->rq_tph_en
, I40IWQPC_RQTPHEN
) |
2455 LS_64(qp
->sq_tph_en
, I40IWQPC_SQTPHEN
) |
2456 LS_64(info
->push_idx
, I40IWQPC_PPIDX
) |
2457 LS_64(info
->push_mode_en
, I40IWQPC_PMENA
);
2459 set_64bit_val(qp_ctx
, 8, qp
->sq_pa
);
2460 set_64bit_val(qp_ctx
, 16, qp
->rq_pa
);
2462 qw3
= LS_64(qp
->src_mac_addr_idx
, I40IWQPC_SRCMACADDRIDX
) |
2463 LS_64(qp
->hw_rq_size
, I40IWQPC_RQSIZE
) |
2464 LS_64(qp
->hw_sq_size
, I40IWQPC_SQSIZE
);
2466 set_64bit_val(qp_ctx
,
2468 LS_64(info
->err_rq_idx
, I40IWQPC_ERR_RQ_IDX
));
2470 set_64bit_val(qp_ctx
,
2472 LS_64(info
->send_cq_num
, I40IWQPC_TXCQNUM
) |
2473 LS_64(info
->rcv_cq_num
, I40IWQPC_RXCQNUM
));
2475 set_64bit_val(qp_ctx
,
2477 LS_64(info
->qp_compl_ctx
, I40IWQPC_QPCOMPCTX
));
2478 set_64bit_val(qp_ctx
,
2480 LS_64(qp
->sq_tph_val
, I40IWQPC_SQTPHVAL
) |
2481 LS_64(qp
->rq_tph_val
, I40IWQPC_RQTPHVAL
) |
2482 LS_64(qp
->qs_handle
, I40IWQPC_QSHANDLE
) |
2483 LS_64(qp
->exception_lan_queue
, I40IWQPC_EXCEPTION_LAN_QUEUE
));
2485 if (info
->iwarp_info_valid
) {
2486 qw0
|= LS_64(iw
->ddp_ver
, I40IWQPC_DDP_VER
) |
2487 LS_64(iw
->rdmap_ver
, I40IWQPC_RDMAP_VER
);
2489 qw7
|= LS_64(iw
->pd_id
, I40IWQPC_PDIDX
);
2490 set_64bit_val(qp_ctx
, 144, qp
->q2_pa
);
2491 set_64bit_val(qp_ctx
,
2493 LS_64(iw
->last_byte_sent
, I40IWQPC_LASTBYTESENT
));
2496 * Hard-code IRD_SIZE to hw-limit, 128, in qpctx, i.e matching an
2497 *advertisable IRD of 64
2499 iw
->ird_size
= I40IW_QPCTX_ENCD_MAXIRD
;
2500 set_64bit_val(qp_ctx
,
2502 LS_64(iw
->ord_size
, I40IWQPC_ORDSIZE
) |
2503 LS_64(iw
->ird_size
, I40IWQPC_IRDSIZE
) |
2504 LS_64(iw
->wr_rdresp_en
, I40IWQPC_WRRDRSPOK
) |
2505 LS_64(iw
->rd_enable
, I40IWQPC_RDOK
) |
2506 LS_64(iw
->snd_mark_en
, I40IWQPC_SNDMARKERS
) |
2507 LS_64(iw
->bind_en
, I40IWQPC_BINDEN
) |
2508 LS_64(iw
->fast_reg_en
, I40IWQPC_FASTREGEN
) |
2509 LS_64(iw
->priv_mode_en
, I40IWQPC_PRIVEN
) |
2510 LS_64(1, I40IWQPC_IWARPMODE
) |
2511 LS_64(iw
->rcv_mark_en
, I40IWQPC_RCVMARKERS
) |
2512 LS_64(iw
->align_hdrs
, I40IWQPC_ALIGNHDRS
) |
2513 LS_64(iw
->rcv_no_mpa_crc
, I40IWQPC_RCVNOMPACRC
) |
2514 LS_64(iw
->rcv_mark_offset
, I40IWQPC_RCVMARKOFFSET
) |
2515 LS_64(iw
->snd_mark_offset
, I40IWQPC_SNDMARKOFFSET
));
2517 if (info
->tcp_info_valid
) {
2518 qw0
|= LS_64(tcp
->ipv4
, I40IWQPC_IPV4
) |
2519 LS_64(tcp
->no_nagle
, I40IWQPC_NONAGLE
) |
2520 LS_64(tcp
->insert_vlan_tag
, I40IWQPC_INSERTVLANTAG
) |
2521 LS_64(tcp
->time_stamp
, I40IWQPC_TIMESTAMP
) |
2522 LS_64(tcp
->cwnd_inc_limit
, I40IWQPC_LIMIT
) |
2523 LS_64(tcp
->drop_ooo_seg
, I40IWQPC_DROPOOOSEG
) |
2524 LS_64(tcp
->dup_ack_thresh
, I40IWQPC_DUPACK_THRESH
);
2526 qw3
|= LS_64(tcp
->ttl
, I40IWQPC_TTL
) |
2527 LS_64(tcp
->src_mac_addr_idx
, I40IWQPC_SRCMACADDRIDX
) |
2528 LS_64(tcp
->avoid_stretch_ack
, I40IWQPC_AVOIDSTRETCHACK
) |
2529 LS_64(tcp
->tos
, I40IWQPC_TOS
) |
2530 LS_64(tcp
->src_port
, I40IWQPC_SRCPORTNUM
) |
2531 LS_64(tcp
->dst_port
, I40IWQPC_DESTPORTNUM
);
2533 qp
->src_mac_addr_idx
= tcp
->src_mac_addr_idx
;
2534 set_64bit_val(qp_ctx
,
2536 LS_64(tcp
->dest_ip_addr2
, I40IWQPC_DESTIPADDR2
) |
2537 LS_64(tcp
->dest_ip_addr3
, I40IWQPC_DESTIPADDR3
));
2539 set_64bit_val(qp_ctx
,
2541 LS_64(tcp
->dest_ip_addr0
, I40IWQPC_DESTIPADDR0
) |
2542 LS_64(tcp
->dest_ip_addr1
, I40IWQPC_DESTIPADDR1
));
2544 set_64bit_val(qp_ctx
,
2546 LS_64(tcp
->snd_mss
, I40IWQPC_SNDMSS
) |
2547 LS_64(tcp
->vlan_tag
, I40IWQPC_VLANTAG
) |
2548 LS_64(tcp
->arp_idx
, I40IWQPC_ARPIDX
));
2550 qw7
|= LS_64(tcp
->flow_label
, I40IWQPC_FLOWLABEL
) |
2551 LS_64(tcp
->wscale
, I40IWQPC_WSCALE
) |
2552 LS_64(tcp
->ignore_tcp_opt
, I40IWQPC_IGNORE_TCP_OPT
) |
2553 LS_64(tcp
->ignore_tcp_uns_opt
, I40IWQPC_IGNORE_TCP_UNS_OPT
) |
2554 LS_64(tcp
->tcp_state
, I40IWQPC_TCPSTATE
) |
2555 LS_64(tcp
->rcv_wscale
, I40IWQPC_RCVSCALE
) |
2556 LS_64(tcp
->snd_wscale
, I40IWQPC_SNDSCALE
);
2558 set_64bit_val(qp_ctx
,
2560 LS_64(tcp
->time_stamp_recent
, I40IWQPC_TIMESTAMP_RECENT
) |
2561 LS_64(tcp
->time_stamp_age
, I40IWQPC_TIMESTAMP_AGE
));
2562 set_64bit_val(qp_ctx
,
2564 LS_64(tcp
->snd_nxt
, I40IWQPC_SNDNXT
) |
2565 LS_64(tcp
->snd_wnd
, I40IWQPC_SNDWND
));
2567 set_64bit_val(qp_ctx
,
2569 LS_64(tcp
->rcv_nxt
, I40IWQPC_RCVNXT
) |
2570 LS_64(tcp
->rcv_wnd
, I40IWQPC_RCVWND
));
2571 set_64bit_val(qp_ctx
,
2573 LS_64(tcp
->snd_max
, I40IWQPC_SNDMAX
) |
2574 LS_64(tcp
->snd_una
, I40IWQPC_SNDUNA
));
2575 set_64bit_val(qp_ctx
,
2577 LS_64(tcp
->srtt
, I40IWQPC_SRTT
) |
2578 LS_64(tcp
->rtt_var
, I40IWQPC_RTTVAR
));
2579 set_64bit_val(qp_ctx
,
2581 LS_64(tcp
->ss_thresh
, I40IWQPC_SSTHRESH
) |
2582 LS_64(tcp
->cwnd
, I40IWQPC_CWND
));
2583 set_64bit_val(qp_ctx
,
2585 LS_64(tcp
->snd_wl1
, I40IWQPC_SNDWL1
) |
2586 LS_64(tcp
->snd_wl2
, I40IWQPC_SNDWL2
));
2587 set_64bit_val(qp_ctx
,
2589 LS_64(tcp
->max_snd_window
, I40IWQPC_MAXSNDWND
) |
2590 LS_64(tcp
->rexmit_thresh
, I40IWQPC_REXMIT_THRESH
));
2591 set_64bit_val(qp_ctx
,
2593 LS_64(tcp
->local_ipaddr3
, I40IWQPC_LOCAL_IPADDR3
) |
2594 LS_64(tcp
->local_ipaddr2
, I40IWQPC_LOCAL_IPADDR2
));
2595 set_64bit_val(qp_ctx
,
2597 LS_64(tcp
->local_ipaddr1
, I40IWQPC_LOCAL_IPADDR1
) |
2598 LS_64(tcp
->local_ipaddr0
, I40IWQPC_LOCAL_IPADDR0
));
2601 set_64bit_val(qp_ctx
, 0, qw0
);
2602 set_64bit_val(qp_ctx
, 24, qw3
);
2603 set_64bit_val(qp_ctx
, 56, qw7
);
2605 i40iw_debug_buf(qp
->dev
, I40IW_DEBUG_WQE
, "QP_HOST)CTX WQE",
2606 qp_ctx
, I40IW_QP_CTX_SIZE
);
2611 * i40iw_sc_alloc_stag - mr stag alloc
2612 * @dev: sc device struct
2614 * @scratch: u64 saved to be used during cqp completion
2615 * @post_sq: flag for cqp db to ring
2617 static enum i40iw_status_code
i40iw_sc_alloc_stag(
2618 struct i40iw_sc_dev
*dev
,
2619 struct i40iw_allocate_stag_info
*info
,
2624 struct i40iw_sc_cqp
*cqp
;
2628 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2630 return I40IW_ERR_RING_FULL
;
2633 LS_64(info
->pd_id
, I40IW_CQPSQ_STAG_PDID
) |
2634 LS_64(info
->total_len
, I40IW_CQPSQ_STAG_STAGLEN
));
2637 LS_64(info
->stag_idx
, I40IW_CQPSQ_STAG_IDX
));
2640 LS_64(info
->hmc_fcn_index
, I40IW_CQPSQ_STAG_HMCFNIDX
));
2642 header
= LS_64(I40IW_CQP_OP_ALLOC_STAG
, I40IW_CQPSQ_OPCODE
) |
2643 LS_64(1, I40IW_CQPSQ_STAG_MR
) |
2644 LS_64(info
->access_rights
, I40IW_CQPSQ_STAG_ARIGHTS
) |
2645 LS_64(info
->chunk_size
, I40IW_CQPSQ_STAG_LPBLSIZE
) |
2646 LS_64(info
->page_size
, I40IW_CQPSQ_STAG_HPAGESIZE
) |
2647 LS_64(info
->remote_access
, I40IW_CQPSQ_STAG_REMACCENABLED
) |
2648 LS_64(info
->use_hmc_fcn_index
, I40IW_CQPSQ_STAG_USEHMCFNIDX
) |
2649 LS_64(info
->use_pf_rid
, I40IW_CQPSQ_STAG_USEPFRID
) |
2650 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2652 i40iw_insert_wqe_hdr(wqe
, header
);
2654 i40iw_debug_buf(dev
, I40IW_DEBUG_WQE
, "ALLOC_STAG WQE",
2655 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2658 i40iw_sc_cqp_post_sq(cqp
);
2663 * i40iw_sc_mr_reg_non_shared - non-shared mr registration
2664 * @dev: sc device struct
2666 * @scratch: u64 saved to be used during cqp completion
2667 * @post_sq: flag for cqp db to ring
2669 static enum i40iw_status_code
i40iw_sc_mr_reg_non_shared(
2670 struct i40iw_sc_dev
*dev
,
2671 struct i40iw_reg_ns_stag_info
*info
,
2677 struct i40iw_sc_cqp
*cqp
;
2683 if (info
->access_rights
& (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY
|
2684 I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY
))
2685 remote_access
= true;
2687 remote_access
= false;
2689 pble_obj_cnt
= dev
->hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
;
2691 if (info
->chunk_size
&& (info
->first_pm_pbl_index
>= pble_obj_cnt
))
2692 return I40IW_ERR_INVALID_PBLE_INDEX
;
2695 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2697 return I40IW_ERR_RING_FULL
;
2699 temp
= (info
->addr_type
== I40IW_ADDR_TYPE_VA_BASED
) ? (uintptr_t)info
->va
: info
->fbo
;
2700 set_64bit_val(wqe
, 0, temp
);
2704 LS_64(info
->total_len
, I40IW_CQPSQ_STAG_STAGLEN
) |
2705 LS_64(info
->pd_id
, I40IW_CQPSQ_STAG_PDID
));
2709 LS_64(info
->stag_key
, I40IW_CQPSQ_STAG_KEY
) |
2710 LS_64(info
->stag_idx
, I40IW_CQPSQ_STAG_IDX
));
2711 if (!info
->chunk_size
) {
2712 set_64bit_val(wqe
, 32, info
->reg_addr_pa
);
2713 set_64bit_val(wqe
, 48, 0);
2715 set_64bit_val(wqe
, 32, 0);
2716 set_64bit_val(wqe
, 48, info
->first_pm_pbl_index
);
2718 set_64bit_val(wqe
, 40, info
->hmc_fcn_index
);
2719 set_64bit_val(wqe
, 56, 0);
2721 addr_type
= (info
->addr_type
== I40IW_ADDR_TYPE_VA_BASED
) ? 1 : 0;
2722 header
= LS_64(I40IW_CQP_OP_REG_MR
, I40IW_CQPSQ_OPCODE
) |
2723 LS_64(1, I40IW_CQPSQ_STAG_MR
) |
2724 LS_64(info
->chunk_size
, I40IW_CQPSQ_STAG_LPBLSIZE
) |
2725 LS_64(info
->page_size
, I40IW_CQPSQ_STAG_HPAGESIZE
) |
2726 LS_64(info
->access_rights
, I40IW_CQPSQ_STAG_ARIGHTS
) |
2727 LS_64(remote_access
, I40IW_CQPSQ_STAG_REMACCENABLED
) |
2728 LS_64(addr_type
, I40IW_CQPSQ_STAG_VABASEDTO
) |
2729 LS_64(info
->use_hmc_fcn_index
, I40IW_CQPSQ_STAG_USEHMCFNIDX
) |
2730 LS_64(info
->use_pf_rid
, I40IW_CQPSQ_STAG_USEPFRID
) |
2731 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2733 i40iw_insert_wqe_hdr(wqe
, header
);
2735 i40iw_debug_buf(dev
, I40IW_DEBUG_WQE
, "MR_REG_NS WQE",
2736 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2739 i40iw_sc_cqp_post_sq(cqp
);
2744 * i40iw_sc_mr_reg_shared - registered shared memory region
2745 * @dev: sc device struct
2746 * @info: info for shared memory registeration
2747 * @scratch: u64 saved to be used during cqp completion
2748 * @post_sq: flag for cqp db to ring
2750 static enum i40iw_status_code
i40iw_sc_mr_reg_shared(
2751 struct i40iw_sc_dev
*dev
,
2752 struct i40iw_register_shared_stag
*info
,
2757 struct i40iw_sc_cqp
*cqp
;
2758 u64 temp
, va64
, fbo
, header
;
2763 if (info
->access_rights
& (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY
|
2764 I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY
))
2765 remote_access
= true;
2767 remote_access
= false;
2769 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2771 return I40IW_ERR_RING_FULL
;
2772 va64
= (uintptr_t)(info
->va
);
2773 va32
= (u32
)(va64
& 0x00000000FFFFFFFF);
2774 fbo
= (u64
)(va32
& (4096 - 1));
2778 (info
->addr_type
== I40IW_ADDR_TYPE_VA_BASED
? (uintptr_t)info
->va
: fbo
));
2782 LS_64(info
->pd_id
, I40IW_CQPSQ_STAG_PDID
));
2783 temp
= LS_64(info
->new_stag_key
, I40IW_CQPSQ_STAG_KEY
) |
2784 LS_64(info
->new_stag_idx
, I40IW_CQPSQ_STAG_IDX
) |
2785 LS_64(info
->parent_stag_idx
, I40IW_CQPSQ_STAG_PARENTSTAGIDX
);
2786 set_64bit_val(wqe
, 16, temp
);
2788 addr_type
= (info
->addr_type
== I40IW_ADDR_TYPE_VA_BASED
) ? 1 : 0;
2789 header
= LS_64(I40IW_CQP_OP_REG_SMR
, I40IW_CQPSQ_OPCODE
) |
2790 LS_64(1, I40IW_CQPSQ_STAG_MR
) |
2791 LS_64(info
->access_rights
, I40IW_CQPSQ_STAG_ARIGHTS
) |
2792 LS_64(remote_access
, I40IW_CQPSQ_STAG_REMACCENABLED
) |
2793 LS_64(addr_type
, I40IW_CQPSQ_STAG_VABASEDTO
) |
2794 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2796 i40iw_insert_wqe_hdr(wqe
, header
);
2798 i40iw_debug_buf(dev
, I40IW_DEBUG_WQE
, "MR_REG_SHARED WQE",
2799 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2802 i40iw_sc_cqp_post_sq(cqp
);
2807 * i40iw_sc_dealloc_stag - deallocate stag
2808 * @dev: sc device struct
2809 * @info: dealloc stag info
2810 * @scratch: u64 saved to be used during cqp completion
2811 * @post_sq: flag for cqp db to ring
2813 static enum i40iw_status_code
i40iw_sc_dealloc_stag(
2814 struct i40iw_sc_dev
*dev
,
2815 struct i40iw_dealloc_stag_info
*info
,
2821 struct i40iw_sc_cqp
*cqp
;
2824 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2826 return I40IW_ERR_RING_FULL
;
2829 LS_64(info
->pd_id
, I40IW_CQPSQ_STAG_PDID
));
2832 LS_64(info
->stag_idx
, I40IW_CQPSQ_STAG_IDX
));
2834 header
= LS_64(I40IW_CQP_OP_DEALLOC_STAG
, I40IW_CQPSQ_OPCODE
) |
2835 LS_64(info
->mr
, I40IW_CQPSQ_STAG_MR
) |
2836 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2838 i40iw_insert_wqe_hdr(wqe
, header
);
2840 i40iw_debug_buf(dev
, I40IW_DEBUG_WQE
, "DEALLOC_STAG WQE",
2841 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2844 i40iw_sc_cqp_post_sq(cqp
);
2849 * i40iw_sc_query_stag - query hardware for stag
2850 * @dev: sc device struct
2851 * @scratch: u64 saved to be used during cqp completion
2852 * @stag_index: stag index for query
2853 * @post_sq: flag for cqp db to ring
2855 static enum i40iw_status_code
i40iw_sc_query_stag(struct i40iw_sc_dev
*dev
,
2862 struct i40iw_sc_cqp
*cqp
;
2865 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2867 return I40IW_ERR_RING_FULL
;
2870 LS_64(stag_index
, I40IW_CQPSQ_QUERYSTAG_IDX
));
2872 header
= LS_64(I40IW_CQP_OP_QUERY_STAG
, I40IW_CQPSQ_OPCODE
) |
2873 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2875 i40iw_insert_wqe_hdr(wqe
, header
);
2877 i40iw_debug_buf(dev
, I40IW_DEBUG_WQE
, "QUERY_STAG WQE",
2878 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2881 i40iw_sc_cqp_post_sq(cqp
);
2886 * i40iw_sc_mw_alloc - mw allocate
2887 * @dev: sc device struct
2888 * @scratch: u64 saved to be used during cqp completion
2889 * @mw_stag_index:stag index
2890 * @pd_id: pd is for this mw
2891 * @post_sq: flag for cqp db to ring
2893 static enum i40iw_status_code
i40iw_sc_mw_alloc(
2894 struct i40iw_sc_dev
*dev
,
2901 struct i40iw_sc_cqp
*cqp
;
2905 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2907 return I40IW_ERR_RING_FULL
;
2908 set_64bit_val(wqe
, 8, LS_64(pd_id
, I40IW_CQPSQ_STAG_PDID
));
2911 LS_64(mw_stag_index
, I40IW_CQPSQ_STAG_IDX
));
2913 header
= LS_64(I40IW_CQP_OP_ALLOC_STAG
, I40IW_CQPSQ_OPCODE
) |
2914 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2916 i40iw_insert_wqe_hdr(wqe
, header
);
2918 i40iw_debug_buf(dev
, I40IW_DEBUG_WQE
, "MW_ALLOC WQE",
2919 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2922 i40iw_sc_cqp_post_sq(cqp
);
2927 * i40iw_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp
2929 * @info: fast mr info
2930 * @post_sq: flag for cqp db to ring
2932 enum i40iw_status_code
i40iw_sc_mr_fast_register(
2933 struct i40iw_sc_qp
*qp
,
2934 struct i40iw_fast_reg_stag_info
*info
,
2941 wqe
= i40iw_qp_get_next_send_wqe(&qp
->qp_uk
, &wqe_idx
, I40IW_QP_WQE_MIN_SIZE
,
2944 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
2946 i40iw_debug(qp
->dev
, I40IW_DEBUG_MR
, "%s: wr_id[%llxh] wqe_idx[%04d] location[%p]\n",
2947 __func__
, info
->wr_id
, wqe_idx
,
2948 &qp
->qp_uk
.sq_wrtrk_array
[wqe_idx
].wrid
);
2949 temp
= (info
->addr_type
== I40IW_ADDR_TYPE_VA_BASED
) ? (uintptr_t)info
->va
: info
->fbo
;
2950 set_64bit_val(wqe
, 0, temp
);
2952 temp
= RS_64(info
->first_pm_pbl_index
>> 16, I40IWQPSQ_FIRSTPMPBLIDXHI
);
2955 LS_64(temp
, I40IWQPSQ_FIRSTPMPBLIDXHI
) |
2956 LS_64(info
->reg_addr_pa
>> I40IWQPSQ_PBLADDR_SHIFT
, I40IWQPSQ_PBLADDR
));
2961 LS_64(info
->first_pm_pbl_index
, I40IWQPSQ_FIRSTPMPBLIDXLO
));
2963 header
= LS_64(info
->stag_key
, I40IWQPSQ_STAGKEY
) |
2964 LS_64(info
->stag_idx
, I40IWQPSQ_STAGINDEX
) |
2965 LS_64(I40IWQP_OP_FAST_REGISTER
, I40IWQPSQ_OPCODE
) |
2966 LS_64(info
->chunk_size
, I40IWQPSQ_LPBLSIZE
) |
2967 LS_64(info
->page_size
, I40IWQPSQ_HPAGESIZE
) |
2968 LS_64(info
->access_rights
, I40IWQPSQ_STAGRIGHTS
) |
2969 LS_64(info
->addr_type
, I40IWQPSQ_VABASEDTO
) |
2970 LS_64(info
->read_fence
, I40IWQPSQ_READFENCE
) |
2971 LS_64(info
->local_fence
, I40IWQPSQ_LOCALFENCE
) |
2972 LS_64(info
->signaled
, I40IWQPSQ_SIGCOMPL
) |
2973 LS_64(qp
->qp_uk
.swqe_polarity
, I40IWQPSQ_VALID
);
2975 i40iw_insert_wqe_hdr(wqe
, header
);
2977 i40iw_debug_buf(qp
->dev
, I40IW_DEBUG_WQE
, "FAST_REG WQE",
2978 wqe
, I40IW_QP_WQE_MIN_SIZE
);
2981 i40iw_qp_post_wr(&qp
->qp_uk
);
2986 * i40iw_sc_send_lsmm - send last streaming mode message
2988 * @lsmm_buf: buffer with lsmm message
2989 * @size: size of lsmm buffer
2990 * @stag: stag of lsmm buffer
2992 static void i40iw_sc_send_lsmm(struct i40iw_sc_qp
*qp
,
2999 struct i40iw_qp_uk
*qp_uk
;
3002 wqe
= qp_uk
->sq_base
->elem
;
3004 set_64bit_val(wqe
, 0, (uintptr_t)lsmm_buf
);
3006 set_64bit_val(wqe
, 8, (size
| LS_64(stag
, I40IWQPSQ_FRAG_STAG
)));
3008 set_64bit_val(wqe
, 16, 0);
3010 header
= LS_64(I40IWQP_OP_RDMA_SEND
, I40IWQPSQ_OPCODE
) |
3011 LS_64(1, I40IWQPSQ_STREAMMODE
) |
3012 LS_64(1, I40IWQPSQ_WAITFORRCVPDU
) |
3013 LS_64(qp
->qp_uk
.swqe_polarity
, I40IWQPSQ_VALID
);
3015 i40iw_insert_wqe_hdr(wqe
, header
);
3017 i40iw_debug_buf(qp
->dev
, I40IW_DEBUG_QP
, "SEND_LSMM WQE",
3018 wqe
, I40IW_QP_WQE_MIN_SIZE
);
3022 * i40iw_sc_send_lsmm_nostag - for privilege qp
3024 * @lsmm_buf: buffer with lsmm message
3025 * @size: size of lsmm buffer
3027 static void i40iw_sc_send_lsmm_nostag(struct i40iw_sc_qp
*qp
,
3033 struct i40iw_qp_uk
*qp_uk
;
3036 wqe
= qp_uk
->sq_base
->elem
;
3038 set_64bit_val(wqe
, 0, (uintptr_t)lsmm_buf
);
3040 set_64bit_val(wqe
, 8, size
);
3042 set_64bit_val(wqe
, 16, 0);
3044 header
= LS_64(I40IWQP_OP_RDMA_SEND
, I40IWQPSQ_OPCODE
) |
3045 LS_64(1, I40IWQPSQ_STREAMMODE
) |
3046 LS_64(1, I40IWQPSQ_WAITFORRCVPDU
) |
3047 LS_64(qp
->qp_uk
.swqe_polarity
, I40IWQPSQ_VALID
);
3049 i40iw_insert_wqe_hdr(wqe
, header
);
3051 i40iw_debug_buf(qp
->dev
, I40IW_DEBUG_WQE
, "SEND_LSMM_NOSTAG WQE",
3052 wqe
, I40IW_QP_WQE_MIN_SIZE
);
3056 * i40iw_sc_send_rtt - send last read0 or write0
3058 * @read: Do read0 or write0
3060 static void i40iw_sc_send_rtt(struct i40iw_sc_qp
*qp
, bool read
)
3064 struct i40iw_qp_uk
*qp_uk
;
3067 wqe
= qp_uk
->sq_base
->elem
;
3069 set_64bit_val(wqe
, 0, 0);
3070 set_64bit_val(wqe
, 8, 0);
3071 set_64bit_val(wqe
, 16, 0);
3073 header
= LS_64(0x1234, I40IWQPSQ_REMSTAG
) |
3074 LS_64(I40IWQP_OP_RDMA_READ
, I40IWQPSQ_OPCODE
) |
3075 LS_64(qp
->qp_uk
.swqe_polarity
, I40IWQPSQ_VALID
);
3076 set_64bit_val(wqe
, 8, ((u64
)0xabcd << 32));
3078 header
= LS_64(I40IWQP_OP_RDMA_WRITE
, I40IWQPSQ_OPCODE
) |
3079 LS_64(qp
->qp_uk
.swqe_polarity
, I40IWQPSQ_VALID
);
3082 i40iw_insert_wqe_hdr(wqe
, header
);
3084 i40iw_debug_buf(qp
->dev
, I40IW_DEBUG_WQE
, "RTR WQE",
3085 wqe
, I40IW_QP_WQE_MIN_SIZE
);
3089 * i40iw_sc_post_wqe0 - send wqe with opcode
3091 * @opcode: opcode to use for wqe0
3093 static enum i40iw_status_code
i40iw_sc_post_wqe0(struct i40iw_sc_qp
*qp
, u8 opcode
)
3097 struct i40iw_qp_uk
*qp_uk
;
3100 wqe
= qp_uk
->sq_base
->elem
;
3103 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
3105 case I40IWQP_OP_NOP
:
3106 set_64bit_val(wqe
, 0, 0);
3107 set_64bit_val(wqe
, 8, 0);
3108 set_64bit_val(wqe
, 16, 0);
3109 header
= LS_64(I40IWQP_OP_NOP
, I40IWQPSQ_OPCODE
) |
3110 LS_64(qp
->qp_uk
.swqe_polarity
, I40IWQPSQ_VALID
);
3112 i40iw_insert_wqe_hdr(wqe
, header
);
3114 case I40IWQP_OP_RDMA_SEND
:
3115 set_64bit_val(wqe
, 0, 0);
3116 set_64bit_val(wqe
, 8, 0);
3117 set_64bit_val(wqe
, 16, 0);
3118 header
= LS_64(I40IWQP_OP_RDMA_SEND
, I40IWQPSQ_OPCODE
) |
3119 LS_64(qp
->qp_uk
.swqe_polarity
, I40IWQPSQ_VALID
) |
3120 LS_64(1, I40IWQPSQ_STREAMMODE
) |
3121 LS_64(1, I40IWQPSQ_WAITFORRCVPDU
);
3123 i40iw_insert_wqe_hdr(wqe
, header
);
3126 i40iw_debug(qp
->dev
, I40IW_DEBUG_QP
, "%s: Invalid WQE zero opcode\n",
3134 * i40iw_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
3135 * @dev : ptr to i40iw_dev struct
3136 * @hmc_fn_id: hmc function id
3138 enum i40iw_status_code
i40iw_sc_init_iw_hmc(struct i40iw_sc_dev
*dev
, u8 hmc_fn_id
)
3140 struct i40iw_hmc_info
*hmc_info
;
3141 struct i40iw_dma_mem query_fpm_mem
;
3142 struct i40iw_virt_mem virt_mem
;
3143 struct i40iw_vfdev
*vf_dev
= NULL
;
3145 enum i40iw_status_code ret_code
= 0;
3146 bool poll_registers
= true;
3150 if (hmc_fn_id
>= I40IW_MAX_VF_FPM_ID
||
3151 (dev
->hmc_fn_id
!= hmc_fn_id
&& hmc_fn_id
< I40IW_FIRST_VF_FPM_ID
))
3152 return I40IW_ERR_INVALID_HMCFN_ID
;
3154 i40iw_debug(dev
, I40IW_DEBUG_HMC
, "hmc_fn_id %u, dev->hmc_fn_id %u\n", hmc_fn_id
,
3156 if (hmc_fn_id
== dev
->hmc_fn_id
) {
3157 hmc_info
= dev
->hmc_info
;
3158 query_fpm_mem
.pa
= dev
->fpm_query_buf_pa
;
3159 query_fpm_mem
.va
= dev
->fpm_query_buf
;
3161 vf_dev
= i40iw_vfdev_from_fpm(dev
, hmc_fn_id
);
3163 return I40IW_ERR_INVALID_VF_ID
;
3165 hmc_info
= &vf_dev
->hmc_info
;
3166 iw_vf_idx
= vf_dev
->iw_vf_idx
;
3167 i40iw_debug(dev
, I40IW_DEBUG_HMC
, "vf_dev %p, hmc_info %p, hmc_obj %p\n", vf_dev
,
3168 hmc_info
, hmc_info
->hmc_obj
);
3169 if (!vf_dev
->fpm_query_buf
) {
3170 if (!dev
->vf_fpm_query_buf
[iw_vf_idx
].va
) {
3171 ret_code
= i40iw_alloc_query_fpm_buf(dev
,
3172 &dev
->vf_fpm_query_buf
[iw_vf_idx
]);
3176 vf_dev
->fpm_query_buf
= dev
->vf_fpm_query_buf
[iw_vf_idx
].va
;
3177 vf_dev
->fpm_query_buf_pa
= dev
->vf_fpm_query_buf
[iw_vf_idx
].pa
;
3179 query_fpm_mem
.pa
= vf_dev
->fpm_query_buf_pa
;
3180 query_fpm_mem
.va
= vf_dev
->fpm_query_buf
;
3182 * It is HARDWARE specific:
3183 * this call is done by PF for VF and
3184 * i40iw_sc_query_fpm_values needs ccq poll
3185 * because PF ccq is already created.
3187 poll_registers
= false;
3190 hmc_info
->hmc_fn_id
= hmc_fn_id
;
3192 if (hmc_fn_id
!= dev
->hmc_fn_id
) {
3194 i40iw_cqp_query_fpm_values_cmd(dev
, &query_fpm_mem
, hmc_fn_id
);
3196 wait_type
= poll_registers
? (u8
)I40IW_CQP_WAIT_POLL_REGS
:
3197 (u8
)I40IW_CQP_WAIT_POLL_CQ
;
3199 ret_code
= i40iw_sc_query_fpm_values(
3202 hmc_info
->hmc_fn_id
,
3210 /* parse the fpm_query_buf and fill hmc obj info */
3212 i40iw_sc_parse_fpm_query_buf((u64
*)query_fpm_mem
.va
,
3214 &dev
->hmc_fpm_misc
);
3217 i40iw_debug_buf(dev
, I40IW_DEBUG_HMC
, "QUERY FPM BUFFER",
3218 query_fpm_mem
.va
, I40IW_QUERY_FPM_BUF_SIZE
);
3220 if (hmc_fn_id
!= dev
->hmc_fn_id
) {
3221 i40iw_cqp_commit_fpm_values_cmd(dev
, &query_fpm_mem
, hmc_fn_id
);
3223 /* parse the fpm_commit_buf and fill hmc obj info */
3224 i40iw_sc_parse_fpm_commit_buf((u64
*)query_fpm_mem
.va
, hmc_info
->hmc_obj
, &hmc_info
->sd_table
.sd_cnt
);
3225 mem_size
= sizeof(struct i40iw_hmc_sd_entry
) *
3226 (hmc_info
->sd_table
.sd_cnt
+ hmc_info
->first_sd_index
);
3227 ret_code
= i40iw_allocate_virt_mem(dev
->hw
, &virt_mem
, mem_size
);
3230 hmc_info
->sd_table
.sd_entry
= virt_mem
.va
;
3233 /* fill size of objects which are fixed */
3234 hmc_info
->hmc_obj
[I40IW_HMC_IW_XFFL
].size
= 4;
3235 hmc_info
->hmc_obj
[I40IW_HMC_IW_Q1FL
].size
= 4;
3236 hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].size
= 8;
3237 hmc_info
->hmc_obj
[I40IW_HMC_IW_APBVT_ENTRY
].size
= 8192;
3238 hmc_info
->hmc_obj
[I40IW_HMC_IW_APBVT_ENTRY
].max_cnt
= 1;
3244 * i40iw_sc_configure_iw_fpm() - commits hmc obj cnt values using cqp command and
3245 * populates fpm base address in hmc_info
3246 * @dev : ptr to i40iw_dev struct
3247 * @hmc_fn_id: hmc function id
3249 static enum i40iw_status_code
i40iw_sc_configure_iw_fpm(struct i40iw_sc_dev
*dev
,
3252 struct i40iw_hmc_info
*hmc_info
;
3253 struct i40iw_hmc_obj_info
*obj_info
;
3255 struct i40iw_dma_mem commit_fpm_mem
;
3257 enum i40iw_status_code ret_code
= 0;
3258 bool poll_registers
= true;
3261 if (hmc_fn_id
>= I40IW_MAX_VF_FPM_ID
||
3262 (dev
->hmc_fn_id
!= hmc_fn_id
&& hmc_fn_id
< I40IW_FIRST_VF_FPM_ID
))
3263 return I40IW_ERR_INVALID_HMCFN_ID
;
3265 if (hmc_fn_id
== dev
->hmc_fn_id
) {
3266 hmc_info
= dev
->hmc_info
;
3268 hmc_info
= i40iw_vf_hmcinfo_from_fpm(dev
, hmc_fn_id
);
3269 poll_registers
= false;
3272 return I40IW_ERR_BAD_PTR
;
3274 obj_info
= hmc_info
->hmc_obj
;
3275 buf
= dev
->fpm_commit_buf
;
3277 /* copy cnt values in commit buf */
3278 for (i
= I40IW_HMC_IW_QP
, j
= 0; i
<= I40IW_HMC_IW_PBLE
;
3280 set_64bit_val(buf
, j
, (u64
)obj_info
[i
].cnt
);
3282 set_64bit_val(buf
, 40, 0); /* APBVT rsvd */
3284 commit_fpm_mem
.pa
= dev
->fpm_commit_buf_pa
;
3285 commit_fpm_mem
.va
= dev
->fpm_commit_buf
;
3286 wait_type
= poll_registers
? (u8
)I40IW_CQP_WAIT_POLL_REGS
:
3287 (u8
)I40IW_CQP_WAIT_POLL_CQ
;
3288 ret_code
= i40iw_sc_commit_fpm_values(
3291 hmc_info
->hmc_fn_id
,
3296 /* parse the fpm_commit_buf and fill hmc obj info */
3298 ret_code
= i40iw_sc_parse_fpm_commit_buf(dev
->fpm_commit_buf
,
3300 &hmc_info
->sd_table
.sd_cnt
);
3302 i40iw_debug_buf(dev
, I40IW_DEBUG_HMC
, "COMMIT FPM BUFFER",
3303 commit_fpm_mem
.va
, I40IW_COMMIT_FPM_BUF_SIZE
);
3309 * cqp_sds_wqe_fill - fill cqp wqe doe sd
3310 * @cqp: struct for cqp hw
3311 * @info; sd info for wqe
3312 * @scratch: u64 saved to be used during cqp completion
3314 static enum i40iw_status_code
cqp_sds_wqe_fill(struct i40iw_sc_cqp
*cqp
,
3315 struct i40iw_update_sds_info
*info
,
3321 int mem_entries
, wqe_entries
;
3322 struct i40iw_dma_mem
*sdbuf
= &cqp
->sdbuf
;
3324 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
3326 return I40IW_ERR_RING_FULL
;
3328 I40IW_CQP_INIT_WQE(wqe
);
3329 wqe_entries
= (info
->cnt
> 3) ? 3 : info
->cnt
;
3330 mem_entries
= info
->cnt
- wqe_entries
;
3332 header
= LS_64(I40IW_CQP_OP_UPDATE_PE_SDS
, I40IW_CQPSQ_OPCODE
) |
3333 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
) |
3334 LS_64(mem_entries
, I40IW_CQPSQ_UPESD_ENTRY_COUNT
);
3337 memcpy(sdbuf
->va
, &info
->entry
[3], (mem_entries
<< 4));
3342 data
|= LS_64(info
->hmc_fn_id
, I40IW_CQPSQ_UPESD_HMCFNID
);
3344 set_64bit_val(wqe
, 16, data
);
3346 switch (wqe_entries
) {
3348 set_64bit_val(wqe
, 48,
3349 (LS_64(info
->entry
[2].cmd
, I40IW_CQPSQ_UPESD_SDCMD
) |
3350 LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID
)));
3352 set_64bit_val(wqe
, 56, info
->entry
[2].data
);
3355 set_64bit_val(wqe
, 32,
3356 (LS_64(info
->entry
[1].cmd
, I40IW_CQPSQ_UPESD_SDCMD
) |
3357 LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID
)));
3359 set_64bit_val(wqe
, 40, info
->entry
[1].data
);
3362 set_64bit_val(wqe
, 0,
3363 LS_64(info
->entry
[0].cmd
, I40IW_CQPSQ_UPESD_SDCMD
));
3365 set_64bit_val(wqe
, 8, info
->entry
[0].data
);
3371 i40iw_insert_wqe_hdr(wqe
, header
);
3373 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "UPDATE_PE_SDS WQE",
3374 wqe
, I40IW_CQP_WQE_SIZE
* 8);
3379 * i40iw_update_pe_sds - cqp wqe for sd
3380 * @dev: ptr to i40iw_dev struct
3381 * @info: sd info for sd's
3382 * @scratch: u64 saved to be used during cqp completion
3384 static enum i40iw_status_code
i40iw_update_pe_sds(struct i40iw_sc_dev
*dev
,
3385 struct i40iw_update_sds_info
*info
,
3388 struct i40iw_sc_cqp
*cqp
= dev
->cqp
;
3389 enum i40iw_status_code ret_code
;
3391 ret_code
= cqp_sds_wqe_fill(cqp
, info
, scratch
);
3393 i40iw_sc_cqp_post_sq(cqp
);
3399 * i40iw_update_sds_noccq - update sd before ccq created
3400 * @dev: sc device struct
3401 * @info: sd info for sd's
3403 enum i40iw_status_code
i40iw_update_sds_noccq(struct i40iw_sc_dev
*dev
,
3404 struct i40iw_update_sds_info
*info
)
3406 u32 error
, val
, tail
;
3407 struct i40iw_sc_cqp
*cqp
= dev
->cqp
;
3408 enum i40iw_status_code ret_code
;
3410 ret_code
= cqp_sds_wqe_fill(cqp
, info
, 0);
3413 i40iw_get_cqp_reg_info(cqp
, &val
, &tail
, &error
);
3415 return I40IW_ERR_CQP_COMPL_ERROR
;
3417 i40iw_sc_cqp_post_sq(cqp
);
3418 ret_code
= i40iw_cqp_poll_registers(cqp
, tail
, I40IW_DONE_COUNT
);
3424 * i40iw_sc_suspend_qp - suspend qp for param change
3425 * @cqp: struct for cqp hw
3427 * @scratch: u64 saved to be used during cqp completion
3429 enum i40iw_status_code
i40iw_sc_suspend_qp(struct i40iw_sc_cqp
*cqp
,
3430 struct i40iw_sc_qp
*qp
,
3436 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
3438 return I40IW_ERR_RING_FULL
;
3439 header
= LS_64(qp
->qp_uk
.qp_id
, I40IW_CQPSQ_SUSPENDQP_QPID
) |
3440 LS_64(I40IW_CQP_OP_SUSPEND_QP
, I40IW_CQPSQ_OPCODE
) |
3441 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
3443 i40iw_insert_wqe_hdr(wqe
, header
);
3445 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "SUSPEND_QP WQE",
3446 wqe
, I40IW_CQP_WQE_SIZE
* 8);
3448 i40iw_sc_cqp_post_sq(cqp
);
3453 * i40iw_sc_resume_qp - resume qp after suspend
3454 * @cqp: struct for cqp hw
3456 * @scratch: u64 saved to be used during cqp completion
3458 enum i40iw_status_code
i40iw_sc_resume_qp(struct i40iw_sc_cqp
*cqp
,
3459 struct i40iw_sc_qp
*qp
,
3465 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
3467 return I40IW_ERR_RING_FULL
;
3470 LS_64(qp
->qs_handle
, I40IW_CQPSQ_RESUMEQP_QSHANDLE
));
3472 header
= LS_64(qp
->qp_uk
.qp_id
, I40IW_CQPSQ_RESUMEQP_QPID
) |
3473 LS_64(I40IW_CQP_OP_RESUME_QP
, I40IW_CQPSQ_OPCODE
) |
3474 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
3476 i40iw_insert_wqe_hdr(wqe
, header
);
3478 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "RESUME_QP WQE",
3479 wqe
, I40IW_CQP_WQE_SIZE
* 8);
3481 i40iw_sc_cqp_post_sq(cqp
);
3486 * i40iw_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
3487 * @cqp: struct for cqp hw
3488 * @scratch: u64 saved to be used during cqp completion
3489 * @hmc_fn_id: hmc function id
3490 * @post_sq: flag for cqp db to ring
3491 * @poll_registers: flag to poll register for cqp completion
3493 enum i40iw_status_code
i40iw_sc_static_hmc_pages_allocated(
3494 struct i40iw_sc_cqp
*cqp
,
3498 bool poll_registers
)
3502 u32 tail
, val
, error
;
3503 enum i40iw_status_code ret_code
= 0;
3505 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
3507 return I40IW_ERR_RING_FULL
;
3510 LS_64(hmc_fn_id
, I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID
));
3512 header
= LS_64(I40IW_CQP_OP_SHMC_PAGES_ALLOCATED
, I40IW_CQPSQ_OPCODE
) |
3513 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
3515 i40iw_insert_wqe_hdr(wqe
, header
);
3517 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "SHMC_PAGES_ALLOCATED WQE",
3518 wqe
, I40IW_CQP_WQE_SIZE
* 8);
3519 i40iw_get_cqp_reg_info(cqp
, &val
, &tail
, &error
);
3521 ret_code
= I40IW_ERR_CQP_COMPL_ERROR
;
3525 i40iw_sc_cqp_post_sq(cqp
);
3527 /* check for cqp sq tail update */
3528 ret_code
= i40iw_cqp_poll_registers(cqp
, tail
, 1000);
3530 ret_code
= i40iw_sc_poll_for_cqp_op_done(cqp
,
3531 I40IW_CQP_OP_SHMC_PAGES_ALLOCATED
,
3539 * i40iw_ring_full - check if cqp ring is full
3540 * @cqp: struct for cqp hw
3542 static bool i40iw_ring_full(struct i40iw_sc_cqp
*cqp
)
3544 return I40IW_RING_FULL_ERR(cqp
->sq_ring
);
3548 * i40iw_est_sd - returns approximate number of SDs for HMC
3549 * @dev: sc device struct
3550 * @hmc_info: hmc structure, size and count for HMC objects
3552 static u64
i40iw_est_sd(struct i40iw_sc_dev
*dev
, struct i40iw_hmc_info
*hmc_info
)
3558 for (i
= I40IW_HMC_IW_QP
; i
< I40IW_HMC_IW_PBLE
; i
++)
3559 size
+= hmc_info
->hmc_obj
[i
].cnt
* hmc_info
->hmc_obj
[i
].size
;
3562 size
+= hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
* hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].size
;
3564 if (size
& 0x1FFFFF)
3565 sd
= (size
>> 21) + 1; /* add 1 for remainder */
3570 /* 2MB alignment for VF PBLE HMC */
3571 size
= hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
* hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].size
;
3572 if (size
& 0x1FFFFF)
3573 sd
+= (size
>> 21) + 1; /* add 1 for remainder */
3582 * i40iw_config_fpm_values - configure HMC objects
3583 * @dev: sc device struct
3584 * @qp_count: desired qp count
3586 enum i40iw_status_code
i40iw_config_fpm_values(struct i40iw_sc_dev
*dev
, u32 qp_count
)
3588 struct i40iw_virt_mem virt_mem
;
3590 u32 qpwantedoriginal
, qpwanted
, mrwanted
, pblewanted
;
3595 struct i40iw_hmc_info
*hmc_info
;
3596 struct i40iw_hmc_fpm_misc
*hmc_fpm_misc
;
3597 enum i40iw_status_code ret_code
= 0;
3599 hmc_info
= dev
->hmc_info
;
3600 hmc_fpm_misc
= &dev
->hmc_fpm_misc
;
3602 ret_code
= i40iw_sc_init_iw_hmc(dev
, dev
->hmc_fn_id
);
3604 i40iw_debug(dev
, I40IW_DEBUG_HMC
,
3605 "i40iw_sc_init_iw_hmc returned error_code = %d\n",
3610 for (i
= I40IW_HMC_IW_QP
; i
< I40IW_HMC_IW_MAX
; i
++)
3611 hmc_info
->hmc_obj
[i
].cnt
= hmc_info
->hmc_obj
[i
].max_cnt
;
3612 sd_needed
= i40iw_est_sd(dev
, hmc_info
);
3613 i40iw_debug(dev
, I40IW_DEBUG_HMC
,
3614 "%s: FW initial max sd_count[%08lld] first_sd_index[%04d]\n",
3615 __func__
, sd_needed
, hmc_info
->first_sd_index
);
3616 i40iw_debug(dev
, I40IW_DEBUG_HMC
,
3617 "%s: sd count %d where max sd is %d\n",
3618 __func__
, hmc_info
->sd_table
.sd_cnt
,
3619 hmc_fpm_misc
->max_sds
);
3621 qpwanted
= min(qp_count
, hmc_info
->hmc_obj
[I40IW_HMC_IW_QP
].max_cnt
);
3622 qpwantedoriginal
= qpwanted
;
3623 mrwanted
= hmc_info
->hmc_obj
[I40IW_HMC_IW_MR
].max_cnt
;
3624 pblewanted
= hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].max_cnt
;
3626 i40iw_debug(dev
, I40IW_DEBUG_HMC
,
3627 "req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d\n",
3628 qp_count
, hmc_fpm_misc
->max_sds
,
3629 hmc_info
->hmc_obj
[I40IW_HMC_IW_QP
].max_cnt
,
3630 hmc_info
->hmc_obj
[I40IW_HMC_IW_CQ
].max_cnt
,
3631 hmc_info
->hmc_obj
[I40IW_HMC_IW_MR
].max_cnt
,
3632 hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].max_cnt
);
3636 hmc_info
->hmc_obj
[I40IW_HMC_IW_QP
].cnt
= qpwanted
;
3637 hmc_info
->hmc_obj
[I40IW_HMC_IW_CQ
].cnt
=
3638 min(2 * qpwanted
, hmc_info
->hmc_obj
[I40IW_HMC_IW_CQ
].cnt
);
3639 hmc_info
->hmc_obj
[I40IW_HMC_IW_SRQ
].cnt
= 0x00; /* Reserved */
3640 hmc_info
->hmc_obj
[I40IW_HMC_IW_HTE
].cnt
=
3641 qpwanted
* hmc_fpm_misc
->ht_multiplier
;
3642 hmc_info
->hmc_obj
[I40IW_HMC_IW_ARP
].cnt
=
3643 hmc_info
->hmc_obj
[I40IW_HMC_IW_ARP
].max_cnt
;
3644 hmc_info
->hmc_obj
[I40IW_HMC_IW_APBVT_ENTRY
].cnt
= 1;
3645 hmc_info
->hmc_obj
[I40IW_HMC_IW_MR
].cnt
= mrwanted
;
3647 hmc_info
->hmc_obj
[I40IW_HMC_IW_XF
].cnt
= I40IW_MAX_WQ_ENTRIES
* qpwanted
;
3648 hmc_info
->hmc_obj
[I40IW_HMC_IW_Q1
].cnt
= 4 * I40IW_MAX_IRD_SIZE
* qpwanted
;
3649 hmc_info
->hmc_obj
[I40IW_HMC_IW_XFFL
].cnt
=
3650 hmc_info
->hmc_obj
[I40IW_HMC_IW_XF
].cnt
/ hmc_fpm_misc
->xf_block_size
;
3651 hmc_info
->hmc_obj
[I40IW_HMC_IW_Q1FL
].cnt
=
3652 hmc_info
->hmc_obj
[I40IW_HMC_IW_Q1
].cnt
/ hmc_fpm_misc
->q1_block_size
;
3653 hmc_info
->hmc_obj
[I40IW_HMC_IW_TIMER
].cnt
=
3654 ((qpwanted
) / 512 + 1) * hmc_fpm_misc
->timer_bucket
;
3655 hmc_info
->hmc_obj
[I40IW_HMC_IW_FSIMC
].cnt
= 0x00;
3656 hmc_info
->hmc_obj
[I40IW_HMC_IW_FSIAV
].cnt
= 0x00;
3657 hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
= pblewanted
;
3659 /* How much memory is needed for all the objects. */
3660 sd_needed
= i40iw_est_sd(dev
, hmc_info
);
3661 if ((loop_count
> 1000) ||
3662 ((!(loop_count
% 10)) &&
3663 (qpwanted
> qpwantedoriginal
* 2 / 3))) {
3664 if (qpwanted
> FPM_MULTIPLIER
) {
3665 qpwanted
-= FPM_MULTIPLIER
;
3667 while (powerof2
< qpwanted
)
3670 qpwanted
= powerof2
;
3675 if (mrwanted
> FPM_MULTIPLIER
* 10)
3676 mrwanted
-= FPM_MULTIPLIER
* 10;
3677 if (pblewanted
> FPM_MULTIPLIER
* 1000)
3678 pblewanted
-= FPM_MULTIPLIER
* 1000;
3679 } while (sd_needed
> hmc_fpm_misc
->max_sds
&& loop_count
< 2000);
3681 sd_needed
= i40iw_est_sd(dev
, hmc_info
);
3683 i40iw_debug(dev
, I40IW_DEBUG_HMC
,
3684 "loop_cnt=%d, sd_needed=%lld, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d\n",
3685 loop_count
, sd_needed
,
3686 hmc_info
->hmc_obj
[I40IW_HMC_IW_QP
].cnt
,
3687 hmc_info
->hmc_obj
[I40IW_HMC_IW_CQ
].cnt
,
3688 hmc_info
->hmc_obj
[I40IW_HMC_IW_MR
].cnt
,
3689 hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
);
3691 ret_code
= i40iw_sc_configure_iw_fpm(dev
, dev
->hmc_fn_id
);
3693 i40iw_debug(dev
, I40IW_DEBUG_HMC
,
3694 "configure_iw_fpm returned error_code[x%08X]\n",
3695 i40iw_rd32(dev
->hw
, dev
->is_pf
? I40E_PFPE_CQPERRCODES
: I40E_VFPE_CQPERRCODES1
));
3699 mem_size
= sizeof(struct i40iw_hmc_sd_entry
) *
3700 (hmc_info
->sd_table
.sd_cnt
+ hmc_info
->first_sd_index
+ 1);
3701 ret_code
= i40iw_allocate_virt_mem(dev
->hw
, &virt_mem
, mem_size
);
3703 i40iw_debug(dev
, I40IW_DEBUG_HMC
,
3704 "%s: failed to allocate memory for sd_entry buffer\n",
3708 hmc_info
->sd_table
.sd_entry
= virt_mem
.va
;
3714 * i40iw_exec_cqp_cmd - execute cqp cmd when wqe are available
3716 * @pcmdinfo: cqp command info
3718 static enum i40iw_status_code
i40iw_exec_cqp_cmd(struct i40iw_sc_dev
*dev
,
3719 struct cqp_commands_info
*pcmdinfo
)
3721 enum i40iw_status_code status
;
3722 struct i40iw_dma_mem values_mem
;
3724 dev
->cqp_cmd_stats
[pcmdinfo
->cqp_cmd
]++;
3725 switch (pcmdinfo
->cqp_cmd
) {
3726 case OP_DELETE_LOCAL_MAC_IPADDR_ENTRY
:
3727 status
= i40iw_sc_del_local_mac_ipaddr_entry(
3728 pcmdinfo
->in
.u
.del_local_mac_ipaddr_entry
.cqp
,
3729 pcmdinfo
->in
.u
.del_local_mac_ipaddr_entry
.scratch
,
3730 pcmdinfo
->in
.u
.del_local_mac_ipaddr_entry
.entry_idx
,
3731 pcmdinfo
->in
.u
.del_local_mac_ipaddr_entry
.ignore_ref_count
,
3734 case OP_CEQ_DESTROY
:
3735 status
= i40iw_sc_ceq_destroy(pcmdinfo
->in
.u
.ceq_destroy
.ceq
,
3736 pcmdinfo
->in
.u
.ceq_destroy
.scratch
,
3739 case OP_AEQ_DESTROY
:
3740 status
= i40iw_sc_aeq_destroy(pcmdinfo
->in
.u
.aeq_destroy
.aeq
,
3741 pcmdinfo
->in
.u
.aeq_destroy
.scratch
,
3745 case OP_DELETE_ARP_CACHE_ENTRY
:
3746 status
= i40iw_sc_del_arp_cache_entry(
3747 pcmdinfo
->in
.u
.del_arp_cache_entry
.cqp
,
3748 pcmdinfo
->in
.u
.del_arp_cache_entry
.scratch
,
3749 pcmdinfo
->in
.u
.del_arp_cache_entry
.arp_index
,
3752 case OP_MANAGE_APBVT_ENTRY
:
3753 status
= i40iw_sc_manage_apbvt_entry(
3754 pcmdinfo
->in
.u
.manage_apbvt_entry
.cqp
,
3755 &pcmdinfo
->in
.u
.manage_apbvt_entry
.info
,
3756 pcmdinfo
->in
.u
.manage_apbvt_entry
.scratch
,
3760 status
= i40iw_sc_ceq_create(pcmdinfo
->in
.u
.ceq_create
.ceq
,
3761 pcmdinfo
->in
.u
.ceq_create
.scratch
,
3765 status
= i40iw_sc_aeq_create(pcmdinfo
->in
.u
.aeq_create
.aeq
,
3766 pcmdinfo
->in
.u
.aeq_create
.scratch
,
3769 case OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY
:
3770 status
= i40iw_sc_alloc_local_mac_ipaddr_entry(
3771 pcmdinfo
->in
.u
.alloc_local_mac_ipaddr_entry
.cqp
,
3772 pcmdinfo
->in
.u
.alloc_local_mac_ipaddr_entry
.scratch
,
3775 case OP_ADD_LOCAL_MAC_IPADDR_ENTRY
:
3776 status
= i40iw_sc_add_local_mac_ipaddr_entry(
3777 pcmdinfo
->in
.u
.add_local_mac_ipaddr_entry
.cqp
,
3778 &pcmdinfo
->in
.u
.add_local_mac_ipaddr_entry
.info
,
3779 pcmdinfo
->in
.u
.add_local_mac_ipaddr_entry
.scratch
,
3782 case OP_MANAGE_QHASH_TABLE_ENTRY
:
3783 status
= i40iw_sc_manage_qhash_table_entry(
3784 pcmdinfo
->in
.u
.manage_qhash_table_entry
.cqp
,
3785 &pcmdinfo
->in
.u
.manage_qhash_table_entry
.info
,
3786 pcmdinfo
->in
.u
.manage_qhash_table_entry
.scratch
,
3791 status
= i40iw_sc_qp_modify(
3792 pcmdinfo
->in
.u
.qp_modify
.qp
,
3793 &pcmdinfo
->in
.u
.qp_modify
.info
,
3794 pcmdinfo
->in
.u
.qp_modify
.scratch
,
3798 case OP_QP_UPLOAD_CONTEXT
:
3799 status
= i40iw_sc_qp_upload_context(
3800 pcmdinfo
->in
.u
.qp_upload_context
.dev
,
3801 &pcmdinfo
->in
.u
.qp_upload_context
.info
,
3802 pcmdinfo
->in
.u
.qp_upload_context
.scratch
,
3807 status
= i40iw_sc_cq_create(
3808 pcmdinfo
->in
.u
.cq_create
.cq
,
3809 pcmdinfo
->in
.u
.cq_create
.scratch
,
3810 pcmdinfo
->in
.u
.cq_create
.check_overflow
,
3814 status
= i40iw_sc_cq_destroy(
3815 pcmdinfo
->in
.u
.cq_destroy
.cq
,
3816 pcmdinfo
->in
.u
.cq_destroy
.scratch
,
3821 status
= i40iw_sc_qp_create(
3822 pcmdinfo
->in
.u
.qp_create
.qp
,
3823 &pcmdinfo
->in
.u
.qp_create
.info
,
3824 pcmdinfo
->in
.u
.qp_create
.scratch
,
3828 status
= i40iw_sc_qp_destroy(
3829 pcmdinfo
->in
.u
.qp_destroy
.qp
,
3830 pcmdinfo
->in
.u
.qp_destroy
.scratch
,
3831 pcmdinfo
->in
.u
.qp_destroy
.remove_hash_idx
,
3832 pcmdinfo
->in
.u
.qp_destroy
.
3838 status
= i40iw_sc_alloc_stag(
3839 pcmdinfo
->in
.u
.alloc_stag
.dev
,
3840 &pcmdinfo
->in
.u
.alloc_stag
.info
,
3841 pcmdinfo
->in
.u
.alloc_stag
.scratch
,
3844 case OP_MR_REG_NON_SHARED
:
3845 status
= i40iw_sc_mr_reg_non_shared(
3846 pcmdinfo
->in
.u
.mr_reg_non_shared
.dev
,
3847 &pcmdinfo
->in
.u
.mr_reg_non_shared
.info
,
3848 pcmdinfo
->in
.u
.mr_reg_non_shared
.scratch
,
3852 case OP_DEALLOC_STAG
:
3853 status
= i40iw_sc_dealloc_stag(
3854 pcmdinfo
->in
.u
.dealloc_stag
.dev
,
3855 &pcmdinfo
->in
.u
.dealloc_stag
.info
,
3856 pcmdinfo
->in
.u
.dealloc_stag
.scratch
,
3861 status
= i40iw_sc_mw_alloc(
3862 pcmdinfo
->in
.u
.mw_alloc
.dev
,
3863 pcmdinfo
->in
.u
.mw_alloc
.scratch
,
3864 pcmdinfo
->in
.u
.mw_alloc
.mw_stag_index
,
3865 pcmdinfo
->in
.u
.mw_alloc
.pd_id
,
3869 case OP_QP_FLUSH_WQES
:
3870 status
= i40iw_sc_qp_flush_wqes(
3871 pcmdinfo
->in
.u
.qp_flush_wqes
.qp
,
3872 &pcmdinfo
->in
.u
.qp_flush_wqes
.info
,
3873 pcmdinfo
->in
.u
.qp_flush_wqes
.
3874 scratch
, pcmdinfo
->post_sq
);
3876 case OP_ADD_ARP_CACHE_ENTRY
:
3877 status
= i40iw_sc_add_arp_cache_entry(
3878 pcmdinfo
->in
.u
.add_arp_cache_entry
.cqp
,
3879 &pcmdinfo
->in
.u
.add_arp_cache_entry
.info
,
3880 pcmdinfo
->in
.u
.add_arp_cache_entry
.scratch
,
3883 case OP_MANAGE_PUSH_PAGE
:
3884 status
= i40iw_sc_manage_push_page(
3885 pcmdinfo
->in
.u
.manage_push_page
.cqp
,
3886 &pcmdinfo
->in
.u
.manage_push_page
.info
,
3887 pcmdinfo
->in
.u
.manage_push_page
.scratch
,
3890 case OP_UPDATE_PE_SDS
:
3891 /* case I40IW_CQP_OP_UPDATE_PE_SDS */
3892 status
= i40iw_update_pe_sds(
3893 pcmdinfo
->in
.u
.update_pe_sds
.dev
,
3894 &pcmdinfo
->in
.u
.update_pe_sds
.info
,
3895 pcmdinfo
->in
.u
.update_pe_sds
.
3899 case OP_MANAGE_HMC_PM_FUNC_TABLE
:
3900 status
= i40iw_sc_manage_hmc_pm_func_table(
3901 pcmdinfo
->in
.u
.manage_hmc_pm
.dev
->cqp
,
3902 pcmdinfo
->in
.u
.manage_hmc_pm
.scratch
,
3903 (u8
)pcmdinfo
->in
.u
.manage_hmc_pm
.info
.vf_id
,
3904 pcmdinfo
->in
.u
.manage_hmc_pm
.info
.free_fcn
,
3908 status
= i40iw_sc_suspend_qp(
3909 pcmdinfo
->in
.u
.suspend_resume
.cqp
,
3910 pcmdinfo
->in
.u
.suspend_resume
.qp
,
3911 pcmdinfo
->in
.u
.suspend_resume
.scratch
);
3914 status
= i40iw_sc_resume_qp(
3915 pcmdinfo
->in
.u
.suspend_resume
.cqp
,
3916 pcmdinfo
->in
.u
.suspend_resume
.qp
,
3917 pcmdinfo
->in
.u
.suspend_resume
.scratch
);
3919 case OP_MANAGE_VF_PBLE_BP
:
3920 status
= i40iw_manage_vf_pble_bp(
3921 pcmdinfo
->in
.u
.manage_vf_pble_bp
.cqp
,
3922 &pcmdinfo
->in
.u
.manage_vf_pble_bp
.info
,
3923 pcmdinfo
->in
.u
.manage_vf_pble_bp
.scratch
, true);
3925 case OP_QUERY_FPM_VALUES
:
3926 values_mem
.pa
= pcmdinfo
->in
.u
.query_fpm_values
.fpm_values_pa
;
3927 values_mem
.va
= pcmdinfo
->in
.u
.query_fpm_values
.fpm_values_va
;
3928 status
= i40iw_sc_query_fpm_values(
3929 pcmdinfo
->in
.u
.query_fpm_values
.cqp
,
3930 pcmdinfo
->in
.u
.query_fpm_values
.scratch
,
3931 pcmdinfo
->in
.u
.query_fpm_values
.hmc_fn_id
,
3932 &values_mem
, true, I40IW_CQP_WAIT_EVENT
);
3934 case OP_COMMIT_FPM_VALUES
:
3935 values_mem
.pa
= pcmdinfo
->in
.u
.commit_fpm_values
.fpm_values_pa
;
3936 values_mem
.va
= pcmdinfo
->in
.u
.commit_fpm_values
.fpm_values_va
;
3937 status
= i40iw_sc_commit_fpm_values(
3938 pcmdinfo
->in
.u
.commit_fpm_values
.cqp
,
3939 pcmdinfo
->in
.u
.commit_fpm_values
.scratch
,
3940 pcmdinfo
->in
.u
.commit_fpm_values
.hmc_fn_id
,
3943 I40IW_CQP_WAIT_EVENT
);
3946 status
= I40IW_NOT_SUPPORTED
;
3954 * i40iw_process_cqp_cmd - process all cqp commands
3955 * @dev: sc device struct
3956 * @pcmdinfo: cqp command info
3958 enum i40iw_status_code
i40iw_process_cqp_cmd(struct i40iw_sc_dev
*dev
,
3959 struct cqp_commands_info
*pcmdinfo
)
3961 enum i40iw_status_code status
= 0;
3962 unsigned long flags
;
3964 spin_lock_irqsave(&dev
->cqp_lock
, flags
);
3965 if (list_empty(&dev
->cqp_cmd_head
) && !i40iw_ring_full(dev
->cqp
))
3966 status
= i40iw_exec_cqp_cmd(dev
, pcmdinfo
);
3968 list_add_tail(&pcmdinfo
->cqp_cmd_entry
, &dev
->cqp_cmd_head
);
3969 spin_unlock_irqrestore(&dev
->cqp_lock
, flags
);
3974 * i40iw_process_bh - called from tasklet for cqp list
3975 * @dev: sc device struct
3977 enum i40iw_status_code
i40iw_process_bh(struct i40iw_sc_dev
*dev
)
3979 enum i40iw_status_code status
= 0;
3980 struct cqp_commands_info
*pcmdinfo
;
3981 unsigned long flags
;
3983 spin_lock_irqsave(&dev
->cqp_lock
, flags
);
3984 while (!list_empty(&dev
->cqp_cmd_head
) && !i40iw_ring_full(dev
->cqp
)) {
3985 pcmdinfo
= (struct cqp_commands_info
*)i40iw_remove_head(&dev
->cqp_cmd_head
);
3987 status
= i40iw_exec_cqp_cmd(dev
, pcmdinfo
);
3991 spin_unlock_irqrestore(&dev
->cqp_lock
, flags
);
3996 * i40iw_iwarp_opcode - determine if incoming is rdma layer
3997 * @info: aeq info for the packet
3998 * @pkt: packet for error
4000 static u32
i40iw_iwarp_opcode(struct i40iw_aeqe_info
*info
, u8
*pkt
)
4003 u32 opcode
= 0xffffffff;
4005 if (info
->q2_data_written
) {
4006 mpa
= (__be16
*)pkt
;
4007 opcode
= ntohs(mpa
[1]) & 0xf;
4013 * i40iw_locate_mpa - return pointer to mpa in the pkt
4014 * @pkt: packet with data
4016 static u8
*i40iw_locate_mpa(u8
*pkt
)
4018 /* skip over ethernet header */
4019 pkt
+= I40IW_MAC_HLEN
;
4021 /* Skip over IP and TCP headers */
4022 pkt
+= 4 * (pkt
[0] & 0x0f);
4023 pkt
+= 4 * ((pkt
[12] >> 4) & 0x0f);
4028 * i40iw_setup_termhdr - termhdr for terminate pkt
4029 * @qp: sc qp ptr for pkt
4031 * @opcode: flush opcode for termhdr
4032 * @layer_etype: error layer + error type
4033 * @err: error cod ein the header
4035 static void i40iw_setup_termhdr(struct i40iw_sc_qp
*qp
,
4036 struct i40iw_terminate_hdr
*hdr
,
4037 enum i40iw_flush_opcode opcode
,
4041 qp
->flush_code
= opcode
;
4042 hdr
->layer_etype
= layer_etype
;
4043 hdr
->error_code
= err
;
4047 * i40iw_bld_terminate_hdr - build terminate message header
4048 * @qp: qp associated with received terminate AE
4049 * @info: the struct contiaing AE information
4051 static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp
*qp
,
4052 struct i40iw_aeqe_info
*info
)
4054 u8
*pkt
= qp
->q2_buf
+ Q2_BAD_FRAME_OFFSET
;
4058 enum i40iw_flush_opcode flush_code
= FLUSH_INVALID
;
4060 struct i40iw_terminate_hdr
*termhdr
;
4062 termhdr
= (struct i40iw_terminate_hdr
*)qp
->q2_buf
;
4063 memset(termhdr
, 0, Q2_BAD_FRAME_OFFSET
);
4065 if (info
->q2_data_written
) {
4066 /* Use data from offending packet to fill in ddp & rdma hdrs */
4067 pkt
= i40iw_locate_mpa(pkt
);
4068 ddp_seg_len
= ntohs(*(__be16
*)pkt
);
4071 termhdr
->hdrct
= DDP_LEN_FLAG
;
4072 if (pkt
[2] & 0x80) {
4074 if (ddp_seg_len
>= TERM_DDP_LEN_TAGGED
) {
4075 copy_len
+= TERM_DDP_LEN_TAGGED
;
4076 termhdr
->hdrct
|= DDP_HDR_FLAG
;
4079 if (ddp_seg_len
>= TERM_DDP_LEN_UNTAGGED
) {
4080 copy_len
+= TERM_DDP_LEN_UNTAGGED
;
4081 termhdr
->hdrct
|= DDP_HDR_FLAG
;
4084 if (ddp_seg_len
>= (TERM_DDP_LEN_UNTAGGED
+ TERM_RDMA_LEN
)) {
4085 if ((pkt
[3] & RDMA_OPCODE_MASK
) == RDMA_READ_REQ_OPCODE
) {
4086 copy_len
+= TERM_RDMA_LEN
;
4087 termhdr
->hdrct
|= RDMA_HDR_FLAG
;
4094 opcode
= i40iw_iwarp_opcode(info
, pkt
);
4096 switch (info
->ae_id
) {
4097 case I40IW_AE_AMP_UNALLOCATED_STAG
:
4098 qp
->eventtype
= TERM_EVENT_QP_ACCESS_ERR
;
4099 if (opcode
== I40IW_OP_TYPE_RDMA_WRITE
)
4100 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_PROT_ERR
,
4101 (LAYER_DDP
<< 4) | DDP_TAGGED_BUFFER
, DDP_TAGGED_INV_STAG
);
4103 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_ACCESS_ERR
,
4104 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_PROT
, RDMAP_INV_STAG
);
4106 case I40IW_AE_AMP_BOUNDS_VIOLATION
:
4107 qp
->eventtype
= TERM_EVENT_QP_ACCESS_ERR
;
4108 if (info
->q2_data_written
)
4109 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_PROT_ERR
,
4110 (LAYER_DDP
<< 4) | DDP_TAGGED_BUFFER
, DDP_TAGGED_BOUNDS
);
4112 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_ACCESS_ERR
,
4113 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_PROT
, RDMAP_INV_BOUNDS
);
4115 case I40IW_AE_AMP_BAD_PD
:
4117 case I40IW_OP_TYPE_RDMA_WRITE
:
4118 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_PROT_ERR
,
4119 (LAYER_DDP
<< 4) | DDP_TAGGED_BUFFER
, DDP_TAGGED_UNASSOC_STAG
);
4121 case I40IW_OP_TYPE_SEND_INV
:
4122 case I40IW_OP_TYPE_SEND_SOL_INV
:
4123 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_ACCESS_ERR
,
4124 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_PROT
, RDMAP_CANT_INV_STAG
);
4127 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_ACCESS_ERR
,
4128 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_PROT
, RDMAP_UNASSOC_STAG
);
4131 case I40IW_AE_AMP_INVALID_STAG
:
4132 qp
->eventtype
= TERM_EVENT_QP_ACCESS_ERR
;
4133 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_ACCESS_ERR
,
4134 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_PROT
, RDMAP_INV_STAG
);
4136 case I40IW_AE_AMP_BAD_QP
:
4137 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_LOC_QP_OP_ERR
,
4138 (LAYER_DDP
<< 4) | DDP_UNTAGGED_BUFFER
, DDP_UNTAGGED_INV_QN
);
4140 case I40IW_AE_AMP_BAD_STAG_KEY
:
4141 case I40IW_AE_AMP_BAD_STAG_INDEX
:
4142 qp
->eventtype
= TERM_EVENT_QP_ACCESS_ERR
;
4144 case I40IW_OP_TYPE_SEND_INV
:
4145 case I40IW_OP_TYPE_SEND_SOL_INV
:
4146 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_OP_ERR
,
4147 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_OP
, RDMAP_CANT_INV_STAG
);
4150 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_ACCESS_ERR
,
4151 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_OP
, RDMAP_INV_STAG
);
4154 case I40IW_AE_AMP_RIGHTS_VIOLATION
:
4155 case I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS
:
4156 case I40IW_AE_PRIV_OPERATION_DENIED
:
4157 qp
->eventtype
= TERM_EVENT_QP_ACCESS_ERR
;
4158 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_ACCESS_ERR
,
4159 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_PROT
, RDMAP_ACCESS
);
4161 case I40IW_AE_AMP_TO_WRAP
:
4162 qp
->eventtype
= TERM_EVENT_QP_ACCESS_ERR
;
4163 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_ACCESS_ERR
,
4164 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_PROT
, RDMAP_TO_WRAP
);
4166 case I40IW_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH
:
4167 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_LOC_LEN_ERR
,
4168 (LAYER_MPA
<< 4) | DDP_LLP
, MPA_MARKER
);
4170 case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR
:
4171 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_GENERAL_ERR
,
4172 (LAYER_MPA
<< 4) | DDP_LLP
, MPA_CRC
);
4174 case I40IW_AE_LLP_SEGMENT_TOO_LARGE
:
4175 case I40IW_AE_LLP_SEGMENT_TOO_SMALL
:
4176 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_LOC_LEN_ERR
,
4177 (LAYER_DDP
<< 4) | DDP_CATASTROPHIC
, DDP_CATASTROPHIC_LOCAL
);
4179 case I40IW_AE_LCE_QP_CATASTROPHIC
:
4180 case I40IW_AE_DDP_NO_L_BIT
:
4181 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_FATAL_ERR
,
4182 (LAYER_DDP
<< 4) | DDP_CATASTROPHIC
, DDP_CATASTROPHIC_LOCAL
);
4184 case I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN
:
4185 case I40IW_AE_DDP_INVALID_MSN_RANGE_IS_NOT_VALID
:
4186 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_GENERAL_ERR
,
4187 (LAYER_DDP
<< 4) | DDP_UNTAGGED_BUFFER
, DDP_UNTAGGED_INV_MSN_RANGE
);
4189 case I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER
:
4190 qp
->eventtype
= TERM_EVENT_QP_ACCESS_ERR
;
4191 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_LOC_LEN_ERR
,
4192 (LAYER_DDP
<< 4) | DDP_UNTAGGED_BUFFER
, DDP_UNTAGGED_INV_TOO_LONG
);
4194 case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION
:
4196 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_GENERAL_ERR
,
4197 (LAYER_DDP
<< 4) | DDP_TAGGED_BUFFER
, DDP_TAGGED_INV_DDP_VER
);
4199 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_GENERAL_ERR
,
4200 (LAYER_DDP
<< 4) | DDP_UNTAGGED_BUFFER
, DDP_UNTAGGED_INV_DDP_VER
);
4202 case I40IW_AE_DDP_UBE_INVALID_MO
:
4203 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_GENERAL_ERR
,
4204 (LAYER_DDP
<< 4) | DDP_UNTAGGED_BUFFER
, DDP_UNTAGGED_INV_MO
);
4206 case I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE
:
4207 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_OP_ERR
,
4208 (LAYER_DDP
<< 4) | DDP_UNTAGGED_BUFFER
, DDP_UNTAGGED_INV_MSN_NO_BUF
);
4210 case I40IW_AE_DDP_UBE_INVALID_QN
:
4211 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_GENERAL_ERR
,
4212 (LAYER_DDP
<< 4) | DDP_UNTAGGED_BUFFER
, DDP_UNTAGGED_INV_QN
);
4214 case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION
:
4215 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_GENERAL_ERR
,
4216 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_OP
, RDMAP_INV_RDMAP_VER
);
4218 case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE
:
4219 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_LOC_QP_OP_ERR
,
4220 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_OP
, RDMAP_UNEXPECTED_OP
);
4223 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_FATAL_ERR
,
4224 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_OP
, RDMAP_UNSPECIFIED
);
4229 memcpy(termhdr
+ 1, pkt
, copy_len
);
4231 if (flush_code
&& !info
->in_rdrsp_wr
)
4232 qp
->sq_flush
= (info
->sq
) ? true : false;
4234 return sizeof(struct i40iw_terminate_hdr
) + copy_len
;
4238 * i40iw_terminate_send_fin() - Send fin for terminate message
4239 * @qp: qp associated with received terminate AE
4241 void i40iw_terminate_send_fin(struct i40iw_sc_qp
*qp
)
4243 /* Send the fin only */
4244 i40iw_term_modify_qp(qp
,
4245 I40IW_QP_STATE_TERMINATE
,
4246 I40IWQP_TERM_SEND_FIN_ONLY
,
4251 * i40iw_terminate_connection() - Bad AE and send terminate to remote QP
4252 * @qp: qp associated with received terminate AE
4253 * @info: the struct contiaing AE information
4255 void i40iw_terminate_connection(struct i40iw_sc_qp
*qp
, struct i40iw_aeqe_info
*info
)
4259 if (qp
->term_flags
& I40IW_TERM_SENT
)
4260 return; /* Sanity check */
4262 /* Eventtype can change from bld_terminate_hdr */
4263 qp
->eventtype
= TERM_EVENT_QP_FATAL
;
4264 termlen
= i40iw_bld_terminate_hdr(qp
, info
);
4265 i40iw_terminate_start_timer(qp
);
4266 qp
->term_flags
|= I40IW_TERM_SENT
;
4267 i40iw_term_modify_qp(qp
, I40IW_QP_STATE_TERMINATE
,
4268 I40IWQP_TERM_SEND_TERM_ONLY
, termlen
);
4272 * i40iw_terminate_received - handle terminate received AE
4273 * @qp: qp associated with received terminate AE
4274 * @info: the struct contiaing AE information
4276 void i40iw_terminate_received(struct i40iw_sc_qp
*qp
, struct i40iw_aeqe_info
*info
)
4278 u8
*pkt
= qp
->q2_buf
+ Q2_BAD_FRAME_OFFSET
;
4283 struct i40iw_terminate_hdr
*termhdr
;
4285 mpa
= (__be32
*)i40iw_locate_mpa(pkt
);
4286 if (info
->q2_data_written
) {
4287 /* did not validate the frame - do it now */
4288 ddp_ctl
= (ntohl(mpa
[0]) >> 8) & 0xff;
4289 rdma_ctl
= ntohl(mpa
[0]) & 0xff;
4290 if ((ddp_ctl
& 0xc0) != 0x40)
4291 aeq_id
= I40IW_AE_LCE_QP_CATASTROPHIC
;
4292 else if ((ddp_ctl
& 0x03) != 1)
4293 aeq_id
= I40IW_AE_DDP_UBE_INVALID_DDP_VERSION
;
4294 else if (ntohl(mpa
[2]) != 2)
4295 aeq_id
= I40IW_AE_DDP_UBE_INVALID_QN
;
4296 else if (ntohl(mpa
[3]) != 1)
4297 aeq_id
= I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN
;
4298 else if (ntohl(mpa
[4]) != 0)
4299 aeq_id
= I40IW_AE_DDP_UBE_INVALID_MO
;
4300 else if ((rdma_ctl
& 0xc0) != 0x40)
4301 aeq_id
= I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION
;
4303 info
->ae_id
= aeq_id
;
4305 /* Bad terminate recvd - send back a terminate */
4306 i40iw_terminate_connection(qp
, info
);
4311 qp
->term_flags
|= I40IW_TERM_RCVD
;
4312 qp
->eventtype
= TERM_EVENT_QP_FATAL
;
4313 termhdr
= (struct i40iw_terminate_hdr
*)&mpa
[5];
4314 if (termhdr
->layer_etype
== RDMAP_REMOTE_PROT
||
4315 termhdr
->layer_etype
== RDMAP_REMOTE_OP
) {
4316 i40iw_terminate_done(qp
, 0);
4318 i40iw_terminate_start_timer(qp
);
4319 i40iw_terminate_send_fin(qp
);
4324 * i40iw_hw_stat_init - Initiliaze HW stats table
4325 * @devstat: pestat struct
4326 * @fcn_idx: PCI fn id
4327 * @hw: PF i40iw_hw structure.
4328 * @is_pf: Is it a PF?
4330 * Populate the HW stat table with register offset addr for each
4331 * stat. And start the perioidic stats timer.
4333 static void i40iw_hw_stat_init(struct i40iw_dev_pestat
*devstat
,
4335 struct i40iw_hw
*hw
, bool is_pf
)
4337 u32 stat_reg_offset
;
4339 struct i40iw_dev_hw_stat_offsets
*stat_table
=
4340 &devstat
->hw_stat_offsets
;
4341 struct i40iw_dev_hw_stats
*last_rd_stats
= &devstat
->last_read_hw_stats
;
4346 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_IP4RXDISCARD
] =
4347 I40E_GLPES_PFIP4RXDISCARD(fcn_idx
);
4348 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_IP4RXTRUNC
] =
4349 I40E_GLPES_PFIP4RXTRUNC(fcn_idx
);
4350 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_IP4TXNOROUTE
] =
4351 I40E_GLPES_PFIP4TXNOROUTE(fcn_idx
);
4352 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_IP6RXDISCARD
] =
4353 I40E_GLPES_PFIP6RXDISCARD(fcn_idx
);
4354 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_IP6RXTRUNC
] =
4355 I40E_GLPES_PFIP6RXTRUNC(fcn_idx
);
4356 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_IP6TXNOROUTE
] =
4357 I40E_GLPES_PFIP6TXNOROUTE(fcn_idx
);
4358 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_TCPRTXSEG
] =
4359 I40E_GLPES_PFTCPRTXSEG(fcn_idx
);
4360 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_TCPRXOPTERR
] =
4361 I40E_GLPES_PFTCPRXOPTERR(fcn_idx
);
4362 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_TCPRXPROTOERR
] =
4363 I40E_GLPES_PFTCPRXPROTOERR(fcn_idx
);
4365 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4RXOCTS
] =
4366 I40E_GLPES_PFIP4RXOCTSLO(fcn_idx
);
4367 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4RXPKTS
] =
4368 I40E_GLPES_PFIP4RXPKTSLO(fcn_idx
);
4369 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4RXFRAGS
] =
4370 I40E_GLPES_PFIP4RXFRAGSLO(fcn_idx
);
4371 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4RXMCPKTS
] =
4372 I40E_GLPES_PFIP4RXMCPKTSLO(fcn_idx
);
4373 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4TXOCTS
] =
4374 I40E_GLPES_PFIP4TXOCTSLO(fcn_idx
);
4375 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4TXPKTS
] =
4376 I40E_GLPES_PFIP4TXPKTSLO(fcn_idx
);
4377 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4TXFRAGS
] =
4378 I40E_GLPES_PFIP4TXFRAGSLO(fcn_idx
);
4379 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4TXMCPKTS
] =
4380 I40E_GLPES_PFIP4TXMCPKTSLO(fcn_idx
);
4381 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6RXOCTS
] =
4382 I40E_GLPES_PFIP6RXOCTSLO(fcn_idx
);
4383 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6RXPKTS
] =
4384 I40E_GLPES_PFIP6RXPKTSLO(fcn_idx
);
4385 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6RXFRAGS
] =
4386 I40E_GLPES_PFIP6RXFRAGSLO(fcn_idx
);
4387 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6RXMCPKTS
] =
4388 I40E_GLPES_PFIP6RXMCPKTSLO(fcn_idx
);
4389 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6TXOCTS
] =
4390 I40E_GLPES_PFIP6TXOCTSLO(fcn_idx
);
4391 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6TXPKTS
] =
4392 I40E_GLPES_PFIP6TXPKTSLO(fcn_idx
);
4393 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6TXPKTS
] =
4394 I40E_GLPES_PFIP6TXPKTSLO(fcn_idx
);
4395 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6TXFRAGS
] =
4396 I40E_GLPES_PFIP6TXFRAGSLO(fcn_idx
);
4397 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_TCPRXSEGS
] =
4398 I40E_GLPES_PFTCPRXSEGSLO(fcn_idx
);
4399 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_TCPTXSEG
] =
4400 I40E_GLPES_PFTCPTXSEGLO(fcn_idx
);
4401 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMARXRDS
] =
4402 I40E_GLPES_PFRDMARXRDSLO(fcn_idx
);
4403 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMARXSNDS
] =
4404 I40E_GLPES_PFRDMARXSNDSLO(fcn_idx
);
4405 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMARXWRS
] =
4406 I40E_GLPES_PFRDMARXWRSLO(fcn_idx
);
4407 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMATXRDS
] =
4408 I40E_GLPES_PFRDMATXRDSLO(fcn_idx
);
4409 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMATXSNDS
] =
4410 I40E_GLPES_PFRDMATXSNDSLO(fcn_idx
);
4411 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMATXWRS
] =
4412 I40E_GLPES_PFRDMATXWRSLO(fcn_idx
);
4413 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMAVBND
] =
4414 I40E_GLPES_PFRDMAVBNDLO(fcn_idx
);
4415 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMAVINV
] =
4416 I40E_GLPES_PFRDMAVINVLO(fcn_idx
);
4418 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_IP4RXDISCARD
] =
4419 I40E_GLPES_VFIP4RXDISCARD(fcn_idx
);
4420 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_IP4RXTRUNC
] =
4421 I40E_GLPES_VFIP4RXTRUNC(fcn_idx
);
4422 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_IP4TXNOROUTE
] =
4423 I40E_GLPES_VFIP4TXNOROUTE(fcn_idx
);
4424 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_IP6RXDISCARD
] =
4425 I40E_GLPES_VFIP6RXDISCARD(fcn_idx
);
4426 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_IP6RXTRUNC
] =
4427 I40E_GLPES_VFIP6RXTRUNC(fcn_idx
);
4428 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_IP6TXNOROUTE
] =
4429 I40E_GLPES_VFIP6TXNOROUTE(fcn_idx
);
4430 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_TCPRTXSEG
] =
4431 I40E_GLPES_VFTCPRTXSEG(fcn_idx
);
4432 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_TCPRXOPTERR
] =
4433 I40E_GLPES_VFTCPRXOPTERR(fcn_idx
);
4434 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_TCPRXPROTOERR
] =
4435 I40E_GLPES_VFTCPRXPROTOERR(fcn_idx
);
4437 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4RXOCTS
] =
4438 I40E_GLPES_VFIP4RXOCTSLO(fcn_idx
);
4439 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4RXPKTS
] =
4440 I40E_GLPES_VFIP4RXPKTSLO(fcn_idx
);
4441 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4RXFRAGS
] =
4442 I40E_GLPES_VFIP4RXFRAGSLO(fcn_idx
);
4443 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4RXMCPKTS
] =
4444 I40E_GLPES_VFIP4RXMCPKTSLO(fcn_idx
);
4445 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4TXOCTS
] =
4446 I40E_GLPES_VFIP4TXOCTSLO(fcn_idx
);
4447 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4TXPKTS
] =
4448 I40E_GLPES_VFIP4TXPKTSLO(fcn_idx
);
4449 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4TXFRAGS
] =
4450 I40E_GLPES_VFIP4TXFRAGSLO(fcn_idx
);
4451 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4TXMCPKTS
] =
4452 I40E_GLPES_VFIP4TXMCPKTSLO(fcn_idx
);
4453 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6RXOCTS
] =
4454 I40E_GLPES_VFIP6RXOCTSLO(fcn_idx
);
4455 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6RXPKTS
] =
4456 I40E_GLPES_VFIP6RXPKTSLO(fcn_idx
);
4457 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6RXFRAGS
] =
4458 I40E_GLPES_VFIP6RXFRAGSLO(fcn_idx
);
4459 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6RXMCPKTS
] =
4460 I40E_GLPES_VFIP6RXMCPKTSLO(fcn_idx
);
4461 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6TXOCTS
] =
4462 I40E_GLPES_VFIP6TXOCTSLO(fcn_idx
);
4463 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6TXPKTS
] =
4464 I40E_GLPES_VFIP6TXPKTSLO(fcn_idx
);
4465 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6TXPKTS
] =
4466 I40E_GLPES_VFIP6TXPKTSLO(fcn_idx
);
4467 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6TXFRAGS
] =
4468 I40E_GLPES_VFIP6TXFRAGSLO(fcn_idx
);
4469 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_TCPRXSEGS
] =
4470 I40E_GLPES_VFTCPRXSEGSLO(fcn_idx
);
4471 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_TCPTXSEG
] =
4472 I40E_GLPES_VFTCPTXSEGLO(fcn_idx
);
4473 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMARXRDS
] =
4474 I40E_GLPES_VFRDMARXRDSLO(fcn_idx
);
4475 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMARXSNDS
] =
4476 I40E_GLPES_VFRDMARXSNDSLO(fcn_idx
);
4477 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMARXWRS
] =
4478 I40E_GLPES_VFRDMARXWRSLO(fcn_idx
);
4479 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMATXRDS
] =
4480 I40E_GLPES_VFRDMATXRDSLO(fcn_idx
);
4481 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMATXSNDS
] =
4482 I40E_GLPES_VFRDMATXSNDSLO(fcn_idx
);
4483 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMATXWRS
] =
4484 I40E_GLPES_VFRDMATXWRSLO(fcn_idx
);
4485 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMAVBND
] =
4486 I40E_GLPES_VFRDMAVBNDLO(fcn_idx
);
4487 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMAVINV
] =
4488 I40E_GLPES_VFRDMAVINVLO(fcn_idx
);
4491 for (stat_index
= 0; stat_index
< I40IW_HW_STAT_INDEX_MAX_64
;
4493 stat_reg_offset
= stat_table
->stat_offset_64
[stat_index
];
4494 last_rd_stats
->stat_value_64
[stat_index
] =
4495 readq(devstat
->hw
->hw_addr
+ stat_reg_offset
);
4498 for (stat_index
= 0; stat_index
< I40IW_HW_STAT_INDEX_MAX_32
;
4500 stat_reg_offset
= stat_table
->stat_offset_32
[stat_index
];
4501 last_rd_stats
->stat_value_32
[stat_index
] =
4502 i40iw_rd32(devstat
->hw
, stat_reg_offset
);
4507 * i40iw_hw_stat_read_32 - Read 32-bit HW stat counters and accommodates for roll-overs.
4508 * @devstat: pestat struct
4509 * @index: index in HW stat table which contains offset reg-addr
4510 * @value: hw stat value
4512 static void i40iw_hw_stat_read_32(struct i40iw_dev_pestat
*devstat
,
4513 enum i40iw_hw_stat_index_32b index
,
4516 struct i40iw_dev_hw_stat_offsets
*stat_table
=
4517 &devstat
->hw_stat_offsets
;
4518 struct i40iw_dev_hw_stats
*last_rd_stats
= &devstat
->last_read_hw_stats
;
4519 struct i40iw_dev_hw_stats
*hw_stats
= &devstat
->hw_stats
;
4520 u64 new_stat_value
= 0;
4521 u32 stat_reg_offset
= stat_table
->stat_offset_32
[index
];
4523 new_stat_value
= i40iw_rd32(devstat
->hw
, stat_reg_offset
);
4525 if (new_stat_value
< last_rd_stats
->stat_value_32
[index
])
4526 hw_stats
->stat_value_32
[index
] += new_stat_value
;
4528 hw_stats
->stat_value_32
[index
] +=
4529 new_stat_value
- last_rd_stats
->stat_value_32
[index
];
4530 last_rd_stats
->stat_value_32
[index
] = new_stat_value
;
4531 *value
= hw_stats
->stat_value_32
[index
];
4535 * i40iw_hw_stat_read_64 - Read HW stat counters (greater than 32-bit) and accommodates for roll-overs.
4536 * @devstat: pestat struct
4537 * @index: index in HW stat table which contains offset reg-addr
4538 * @value: hw stat value
4540 static void i40iw_hw_stat_read_64(struct i40iw_dev_pestat
*devstat
,
4541 enum i40iw_hw_stat_index_64b index
,
4544 struct i40iw_dev_hw_stat_offsets
*stat_table
=
4545 &devstat
->hw_stat_offsets
;
4546 struct i40iw_dev_hw_stats
*last_rd_stats
= &devstat
->last_read_hw_stats
;
4547 struct i40iw_dev_hw_stats
*hw_stats
= &devstat
->hw_stats
;
4548 u64 new_stat_value
= 0;
4549 u32 stat_reg_offset
= stat_table
->stat_offset_64
[index
];
4551 new_stat_value
= readq(devstat
->hw
->hw_addr
+ stat_reg_offset
);
4553 if (new_stat_value
< last_rd_stats
->stat_value_64
[index
])
4554 hw_stats
->stat_value_64
[index
] += new_stat_value
;
4556 hw_stats
->stat_value_64
[index
] +=
4557 new_stat_value
- last_rd_stats
->stat_value_64
[index
];
4558 last_rd_stats
->stat_value_64
[index
] = new_stat_value
;
4559 *value
= hw_stats
->stat_value_64
[index
];
4563 * i40iw_hw_stat_read_all - read all HW stat counters
4564 * @devstat: pestat struct
4565 * @stat_values: hw stats structure
4567 * Read all the HW stat counters and populates hw_stats structure
4568 * of passed-in dev's pestat as well as copy created in stat_values.
4570 static void i40iw_hw_stat_read_all(struct i40iw_dev_pestat
*devstat
,
4571 struct i40iw_dev_hw_stats
*stat_values
)
4575 for (stat_index
= 0; stat_index
< I40IW_HW_STAT_INDEX_MAX_32
;
4577 i40iw_hw_stat_read_32(devstat
, stat_index
,
4578 &stat_values
->stat_value_32
[stat_index
]);
4579 for (stat_index
= 0; stat_index
< I40IW_HW_STAT_INDEX_MAX_64
;
4581 i40iw_hw_stat_read_64(devstat
, stat_index
,
4582 &stat_values
->stat_value_64
[stat_index
]);
4586 * i40iw_hw_stat_refresh_all - Update all HW stat structs
4587 * @devstat: pestat struct
4588 * @stat_values: hw stats structure
4590 * Read all the HW stat counters to refresh values in hw_stats structure
4591 * of passed-in dev's pestat
4593 static void i40iw_hw_stat_refresh_all(struct i40iw_dev_pestat
*devstat
)
4598 for (stat_index
= 0; stat_index
< I40IW_HW_STAT_INDEX_MAX_32
;
4600 i40iw_hw_stat_read_32(devstat
, stat_index
, &stat_value
);
4601 for (stat_index
= 0; stat_index
< I40IW_HW_STAT_INDEX_MAX_64
;
4603 i40iw_hw_stat_read_64(devstat
, stat_index
, &stat_value
);
4606 static struct i40iw_cqp_ops iw_cqp_ops
= {
4608 i40iw_sc_cqp_create
,
4609 i40iw_sc_cqp_post_sq
,
4610 i40iw_sc_cqp_get_next_send_wqe
,
4611 i40iw_sc_cqp_destroy
,
4612 i40iw_sc_poll_for_cqp_op_done
4615 static struct i40iw_ccq_ops iw_ccq_ops
= {
4617 i40iw_sc_ccq_create
,
4618 i40iw_sc_ccq_destroy
,
4619 i40iw_sc_ccq_create_done
,
4620 i40iw_sc_ccq_get_cqe_info
,
4624 static struct i40iw_ceq_ops iw_ceq_ops
= {
4626 i40iw_sc_ceq_create
,
4627 i40iw_sc_cceq_create_done
,
4628 i40iw_sc_cceq_destroy_done
,
4629 i40iw_sc_cceq_create
,
4630 i40iw_sc_ceq_destroy
,
4631 i40iw_sc_process_ceq
4634 static struct i40iw_aeq_ops iw_aeq_ops
= {
4636 i40iw_sc_aeq_create
,
4637 i40iw_sc_aeq_destroy
,
4638 i40iw_sc_get_next_aeqe
,
4639 i40iw_sc_repost_aeq_entries
,
4640 i40iw_sc_aeq_create_done
,
4641 i40iw_sc_aeq_destroy_done
4645 static struct i40iw_pd_ops iw_pd_ops
= {
4649 static struct i40iw_priv_qp_ops iw_priv_qp_ops
= {
4650 .qp_init
= i40iw_sc_qp_init
,
4651 .qp_create
= i40iw_sc_qp_create
,
4652 .qp_modify
= i40iw_sc_qp_modify
,
4653 .qp_destroy
= i40iw_sc_qp_destroy
,
4654 .qp_flush_wqes
= i40iw_sc_qp_flush_wqes
,
4655 .qp_upload_context
= i40iw_sc_qp_upload_context
,
4656 .qp_setctx
= i40iw_sc_qp_setctx
,
4657 .qp_send_lsmm
= i40iw_sc_send_lsmm
,
4658 .qp_send_lsmm_nostag
= i40iw_sc_send_lsmm_nostag
,
4659 .qp_send_rtt
= i40iw_sc_send_rtt
,
4660 .qp_post_wqe0
= i40iw_sc_post_wqe0
,
4661 .iw_mr_fast_register
= i40iw_sc_mr_fast_register
4664 static struct i40iw_priv_cq_ops iw_priv_cq_ops
= {
4667 i40iw_sc_cq_destroy
,
4671 static struct i40iw_mr_ops iw_mr_ops
= {
4672 i40iw_sc_alloc_stag
,
4673 i40iw_sc_mr_reg_non_shared
,
4674 i40iw_sc_mr_reg_shared
,
4675 i40iw_sc_dealloc_stag
,
4676 i40iw_sc_query_stag
,
4680 static struct i40iw_cqp_misc_ops iw_cqp_misc_ops
= {
4681 i40iw_sc_manage_push_page
,
4682 i40iw_sc_manage_hmc_pm_func_table
,
4683 i40iw_sc_set_hmc_resource_profile
,
4684 i40iw_sc_commit_fpm_values
,
4685 i40iw_sc_query_fpm_values
,
4686 i40iw_sc_static_hmc_pages_allocated
,
4687 i40iw_sc_add_arp_cache_entry
,
4688 i40iw_sc_del_arp_cache_entry
,
4689 i40iw_sc_query_arp_cache_entry
,
4690 i40iw_sc_manage_apbvt_entry
,
4691 i40iw_sc_manage_qhash_table_entry
,
4692 i40iw_sc_alloc_local_mac_ipaddr_entry
,
4693 i40iw_sc_add_local_mac_ipaddr_entry
,
4694 i40iw_sc_del_local_mac_ipaddr_entry
,
4696 i40iw_sc_commit_fpm_values_done
,
4697 i40iw_sc_query_fpm_values_done
,
4698 i40iw_sc_manage_hmc_pm_func_table_done
,
4699 i40iw_sc_suspend_qp
,
4703 static struct i40iw_hmc_ops iw_hmc_ops
= {
4704 i40iw_sc_init_iw_hmc
,
4705 i40iw_sc_parse_fpm_query_buf
,
4706 i40iw_sc_configure_iw_fpm
,
4707 i40iw_sc_parse_fpm_commit_buf
,
4708 i40iw_sc_create_hmc_obj
,
4709 i40iw_sc_del_hmc_obj
,
4714 static const struct i40iw_device_pestat_ops iw_device_pestat_ops
= {
4716 i40iw_hw_stat_read_32
,
4717 i40iw_hw_stat_read_64
,
4718 i40iw_hw_stat_read_all
,
4719 i40iw_hw_stat_refresh_all
4723 * i40iw_device_init_pestat - Initialize the pestat structure
4724 * @dev: pestat struct
4726 enum i40iw_status_code
i40iw_device_init_pestat(struct i40iw_dev_pestat
*devstat
)
4728 devstat
->ops
= iw_device_pestat_ops
;
4733 * i40iw_device_init - Initialize IWARP device
4734 * @dev: IWARP device pointer
4735 * @info: IWARP init info
4737 enum i40iw_status_code
i40iw_device_init(struct i40iw_sc_dev
*dev
,
4738 struct i40iw_device_init_info
*info
)
4743 enum i40iw_status_code ret_code
= 0;
4746 spin_lock_init(&dev
->cqp_lock
);
4747 INIT_LIST_HEAD(&dev
->cqp_cmd_head
); /* for the cqp commands backlog. */
4749 i40iw_device_init_uk(&dev
->dev_uk
);
4751 dev
->debug_mask
= info
->debug_mask
;
4753 ret_code
= i40iw_device_init_pestat(&dev
->dev_pestat
);
4755 i40iw_debug(dev
, I40IW_DEBUG_DEV
,
4756 "%s: i40iw_device_init_pestat failed\n", __func__
);
4759 dev
->hmc_fn_id
= info
->hmc_fn_id
;
4760 dev
->qs_handle
= info
->qs_handle
;
4761 dev
->exception_lan_queue
= info
->exception_lan_queue
;
4762 dev
->is_pf
= info
->is_pf
;
4764 dev
->fpm_query_buf_pa
= info
->fpm_query_buf_pa
;
4765 dev
->fpm_query_buf
= info
->fpm_query_buf
;
4767 dev
->fpm_commit_buf_pa
= info
->fpm_commit_buf_pa
;
4768 dev
->fpm_commit_buf
= info
->fpm_commit_buf
;
4771 dev
->hw
->hw_addr
= info
->bar0
;
4773 val
= i40iw_rd32(dev
->hw
, I40E_GLPCI_DREVID
);
4774 dev
->hw_rev
= (u8
)RS_32(val
, I40E_GLPCI_DREVID_DEFAULT_REVID
);
4777 dev
->dev_pestat
.ops
.iw_hw_stat_init(&dev
->dev_pestat
,
4778 dev
->hmc_fn_id
, dev
->hw
, true);
4779 spin_lock_init(&dev
->dev_pestat
.stats_lock
);
4780 /*start the periodic stats_timer */
4781 i40iw_hw_stats_start_timer(dev
);
4782 val
= i40iw_rd32(dev
->hw
, I40E_GLPCI_LBARCTRL
);
4783 db_size
= (u8
)RS_32(val
, I40E_GLPCI_LBARCTRL_PE_DB_SIZE
);
4784 if ((db_size
!= I40IW_PE_DB_SIZE_4M
) &&
4785 (db_size
!= I40IW_PE_DB_SIZE_8M
)) {
4786 i40iw_debug(dev
, I40IW_DEBUG_DEV
,
4787 "%s: PE doorbell is not enabled in CSR val 0x%x\n",
4789 ret_code
= I40IW_ERR_PE_DOORBELL_NOT_ENABLED
;
4792 dev
->db_addr
= dev
->hw
->hw_addr
+ I40IW_DB_ADDR_OFFSET
;
4793 dev
->vchnl_if
.vchnl_recv
= i40iw_vchnl_recv_pf
;
4795 dev
->db_addr
= dev
->hw
->hw_addr
+ I40IW_VF_DB_ADDR_OFFSET
;
4798 dev
->cqp_ops
= &iw_cqp_ops
;
4799 dev
->ccq_ops
= &iw_ccq_ops
;
4800 dev
->ceq_ops
= &iw_ceq_ops
;
4801 dev
->aeq_ops
= &iw_aeq_ops
;
4802 dev
->cqp_misc_ops
= &iw_cqp_misc_ops
;
4803 dev
->iw_pd_ops
= &iw_pd_ops
;
4804 dev
->iw_priv_qp_ops
= &iw_priv_qp_ops
;
4805 dev
->iw_priv_cq_ops
= &iw_priv_cq_ops
;
4806 dev
->mr_ops
= &iw_mr_ops
;
4807 dev
->hmc_ops
= &iw_hmc_ops
;
4808 dev
->vchnl_if
.vchnl_send
= info
->vchnl_send
;
4809 if (dev
->vchnl_if
.vchnl_send
)
4810 dev
->vchnl_up
= true;
4812 dev
->vchnl_up
= false;
4814 dev
->vchnl_if
.vchnl_recv
= i40iw_vchnl_recv_vf
;
4815 ret_code
= i40iw_vchnl_vf_get_ver(dev
, &vchnl_ver
);
4817 i40iw_debug(dev
, I40IW_DEBUG_DEV
,
4818 "%s: Get Channel version rc = 0x%0x, version is %u\n",
4819 __func__
, ret_code
, vchnl_ver
);
4820 ret_code
= i40iw_vchnl_vf_get_hmc_fcn(dev
, &hmc_fcn
);
4822 i40iw_debug(dev
, I40IW_DEBUG_DEV
,
4823 "%s Get HMC function rc = 0x%0x, hmc fcn is %u\n",
4824 __func__
, ret_code
, hmc_fcn
);
4825 dev
->hmc_fn_id
= (u8
)hmc_fcn
;
4829 dev
->iw_vf_cqp_ops
= &iw_vf_cqp_ops
;