1 /*******************************************************************************
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 *******************************************************************************/
35 #include "i40iw_osdep.h"
36 #include "i40iw_register.h"
37 #include "i40iw_status.h"
38 #include "i40iw_hmc.h"
41 #include "i40iw_type.h"
44 #include "i40iw_virtchnl.h"
47 * i40iw_insert_wqe_hdr - write wqe header
48 * @wqe: cqp wqe for header
49 * @header: header for the cqp wqe
51 static inline void i40iw_insert_wqe_hdr(u64
*wqe
, u64 header
)
53 wmb(); /* make sure WQE is populated before polarity is set */
54 set_64bit_val(wqe
, 24, header
);
58 * i40iw_get_cqp_reg_info - get head and tail for cqp using registers
59 * @cqp: struct for cqp hw
60 * @val: cqp tail register value
61 * @tail:wqtail register value
62 * @error: cqp processing err
64 static inline void i40iw_get_cqp_reg_info(struct i40iw_sc_cqp
*cqp
,
69 if (cqp
->dev
->is_pf
) {
70 *val
= i40iw_rd32(cqp
->dev
->hw
, I40E_PFPE_CQPTAIL
);
71 *tail
= RS_32(*val
, I40E_PFPE_CQPTAIL_WQTAIL
);
72 *error
= RS_32(*val
, I40E_PFPE_CQPTAIL_CQP_OP_ERR
);
74 *val
= i40iw_rd32(cqp
->dev
->hw
, I40E_VFPE_CQPTAIL1
);
75 *tail
= RS_32(*val
, I40E_VFPE_CQPTAIL_WQTAIL
);
76 *error
= RS_32(*val
, I40E_VFPE_CQPTAIL_CQP_OP_ERR
);
81 * i40iw_cqp_poll_registers - poll cqp registers
82 * @cqp: struct for cqp hw
83 * @tail:wqtail register value
84 * @count: how many times to try for completion
86 static enum i40iw_status_code
i40iw_cqp_poll_registers(
87 struct i40iw_sc_cqp
*cqp
,
92 u32 newtail
, error
, val
;
96 i40iw_get_cqp_reg_info(cqp
, &val
, &newtail
, &error
);
98 error
= (cqp
->dev
->is_pf
) ?
99 i40iw_rd32(cqp
->dev
->hw
, I40E_PFPE_CQPERRCODES
) :
100 i40iw_rd32(cqp
->dev
->hw
, I40E_VFPE_CQPERRCODES1
);
101 return I40IW_ERR_CQP_COMPL_ERROR
;
103 if (newtail
!= tail
) {
105 I40IW_RING_MOVE_TAIL(cqp
->sq_ring
);
108 udelay(I40IW_SLEEP_COUNT
);
110 return I40IW_ERR_TIMEOUT
;
114 * i40iw_sc_parse_fpm_commit_buf - parse fpm commit buffer
115 * @buf: ptr to fpm commit buffer
116 * @info: ptr to i40iw_hmc_obj_info struct
118 * parses fpm commit info and copy base value
119 * of hmc objects in hmc_info
121 static enum i40iw_status_code
i40iw_sc_parse_fpm_commit_buf(
123 struct i40iw_hmc_obj_info
*info
)
129 /* copy base values in obj_info */
130 for (i
= I40IW_HMC_IW_QP
, j
= 0;
131 i
<= I40IW_HMC_IW_PBLE
; i
++, j
+= 8) {
132 get_64bit_val(buf
, j
, &temp
);
133 info
[i
].base
= RS_64_1(temp
, 32) * 512;
142 * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
143 * @buf: ptr to fpm query buffer
144 * @info: ptr to i40iw_hmc_obj_info struct
145 * @hmc_fpm_misc: ptr to fpm data
147 * parses fpm query buffer and copy max_cnt and
148 * size value of hmc objects in hmc_info
150 static enum i40iw_status_code
i40iw_sc_parse_fpm_query_buf(
152 struct i40iw_hmc_info
*hmc_info
,
153 struct i40iw_hmc_fpm_misc
*hmc_fpm_misc
)
156 struct i40iw_hmc_obj_info
*obj_info
;
160 obj_info
= hmc_info
->hmc_obj
;
162 get_64bit_val(buf
, 0, &temp
);
163 hmc_info
->first_sd_index
= (u16
)RS_64(temp
, I40IW_QUERY_FPM_FIRST_PE_SD_INDEX
);
164 max_pe_sds
= (u16
)RS_64(temp
, I40IW_QUERY_FPM_MAX_PE_SDS
);
166 /* Reduce SD count for VFs by 1 to account for PBLE backing page rounding */
167 if (hmc_info
->hmc_fn_id
>= I40IW_FIRST_VF_FPM_ID
)
169 hmc_fpm_misc
->max_sds
= max_pe_sds
;
170 hmc_info
->sd_table
.sd_cnt
= max_pe_sds
+ hmc_info
->first_sd_index
;
172 for (i
= I40IW_HMC_IW_QP
, j
= 8;
173 i
<= I40IW_HMC_IW_ARP
; i
++, j
+= 8) {
174 get_64bit_val(buf
, j
, &temp
);
175 if (i
== I40IW_HMC_IW_QP
)
176 obj_info
[i
].max_cnt
= (u32
)RS_64(temp
, I40IW_QUERY_FPM_MAX_QPS
);
177 else if (i
== I40IW_HMC_IW_CQ
)
178 obj_info
[i
].max_cnt
= (u32
)RS_64(temp
, I40IW_QUERY_FPM_MAX_CQS
);
180 obj_info
[i
].max_cnt
= (u32
)temp
;
182 size
= (u32
)RS_64_1(temp
, 32);
183 obj_info
[i
].size
= ((u64
)1 << size
);
185 for (i
= I40IW_HMC_IW_MR
, j
= 48;
186 i
<= I40IW_HMC_IW_PBLE
; i
++, j
+= 8) {
187 get_64bit_val(buf
, j
, &temp
);
188 obj_info
[i
].max_cnt
= (u32
)temp
;
189 size
= (u32
)RS_64_1(temp
, 32);
190 obj_info
[i
].size
= LS_64_1(1, size
);
193 get_64bit_val(buf
, 120, &temp
);
194 hmc_fpm_misc
->max_ceqs
= (u8
)RS_64(temp
, I40IW_QUERY_FPM_MAX_CEQS
);
195 get_64bit_val(buf
, 120, &temp
);
196 hmc_fpm_misc
->ht_multiplier
= RS_64(temp
, I40IW_QUERY_FPM_HTMULTIPLIER
);
197 get_64bit_val(buf
, 120, &temp
);
198 hmc_fpm_misc
->timer_bucket
= RS_64(temp
, I40IW_QUERY_FPM_TIMERBUCKET
);
199 get_64bit_val(buf
, 64, &temp
);
200 hmc_fpm_misc
->xf_block_size
= RS_64(temp
, I40IW_QUERY_FPM_XFBLOCKSIZE
);
201 if (!hmc_fpm_misc
->xf_block_size
)
202 return I40IW_ERR_INVALID_SIZE
;
203 get_64bit_val(buf
, 80, &temp
);
204 hmc_fpm_misc
->q1_block_size
= RS_64(temp
, I40IW_QUERY_FPM_Q1BLOCKSIZE
);
205 if (!hmc_fpm_misc
->q1_block_size
)
206 return I40IW_ERR_INVALID_SIZE
;
211 * i40iw_sc_pd_init - initialize sc pd struct
212 * @dev: sc device struct
214 * @pd_id: pd_id for allocated pd
216 static void i40iw_sc_pd_init(struct i40iw_sc_dev
*dev
,
217 struct i40iw_sc_pd
*pd
,
220 pd
->size
= sizeof(*pd
);
226 * i40iw_get_encoded_wqe_size - given wq size, returns hardware encoded size
227 * @wqsize: size of the wq (sq, rq, srq) to encoded_size
228 * @cqpsq: encoded size for sq for cqp as its encoded size is 1+ other wq's
230 u8
i40iw_get_encoded_wqe_size(u32 wqsize
, bool cqpsq
)
234 /* cqp sq's hw coded value starts from 1 for size of 4
235 * while it starts from 0 for qp' wq's.
246 * i40iw_sc_cqp_init - Initialize buffers for a control Queue Pair
247 * @cqp: IWARP control queue pair pointer
248 * @info: IWARP control queue pair init info pointer
250 * Initializes the object and context buffers for a control Queue Pair.
252 static enum i40iw_status_code
i40iw_sc_cqp_init(struct i40iw_sc_cqp
*cqp
,
253 struct i40iw_cqp_init_info
*info
)
257 if ((info
->sq_size
> I40IW_CQP_SW_SQSIZE_2048
) ||
258 (info
->sq_size
< I40IW_CQP_SW_SQSIZE_4
) ||
259 ((info
->sq_size
& (info
->sq_size
- 1))))
260 return I40IW_ERR_INVALID_SIZE
;
262 hw_sq_size
= i40iw_get_encoded_wqe_size(info
->sq_size
, true);
263 cqp
->size
= sizeof(*cqp
);
264 cqp
->sq_size
= info
->sq_size
;
265 cqp
->hw_sq_size
= hw_sq_size
;
266 cqp
->sq_base
= info
->sq
;
267 cqp
->host_ctx
= info
->host_ctx
;
268 cqp
->sq_pa
= info
->sq_pa
;
269 cqp
->host_ctx_pa
= info
->host_ctx_pa
;
270 cqp
->dev
= info
->dev
;
271 cqp
->struct_ver
= info
->struct_ver
;
272 cqp
->scratch_array
= info
->scratch_array
;
274 cqp
->en_datacenter_tcp
= info
->en_datacenter_tcp
;
275 cqp
->enabled_vf_count
= info
->enabled_vf_count
;
276 cqp
->hmc_profile
= info
->hmc_profile
;
277 info
->dev
->cqp
= cqp
;
279 I40IW_RING_INIT(cqp
->sq_ring
, cqp
->sq_size
);
280 i40iw_debug(cqp
->dev
, I40IW_DEBUG_WQE
,
281 "%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n",
282 __func__
, cqp
->sq_size
, cqp
->hw_sq_size
,
283 cqp
->sq_base
, cqp
->sq_pa
, cqp
, cqp
->polarity
);
288 * i40iw_sc_cqp_create - create cqp during bringup
289 * @cqp: struct for cqp hw
290 * @disable_pfpdus: if pfpdu to be disabled
291 * @maj_err: If error, major err number
292 * @min_err: If error, minor err number
294 static enum i40iw_status_code
i40iw_sc_cqp_create(struct i40iw_sc_cqp
*cqp
,
300 u32 cnt
= 0, p1
, p2
, val
= 0, err_code
;
301 enum i40iw_status_code ret_code
;
303 ret_code
= i40iw_allocate_dma_mem(cqp
->dev
->hw
,
306 I40IW_SD_BUF_ALIGNMENT
);
311 temp
= LS_64(cqp
->hw_sq_size
, I40IW_CQPHC_SQSIZE
) |
312 LS_64(cqp
->struct_ver
, I40IW_CQPHC_SVER
);
315 temp
|= LS_64(1, I40IW_CQPHC_DISABLE_PFPDUS
);
317 set_64bit_val(cqp
->host_ctx
, 0, temp
);
318 set_64bit_val(cqp
->host_ctx
, 8, cqp
->sq_pa
);
319 temp
= LS_64(cqp
->enabled_vf_count
, I40IW_CQPHC_ENABLED_VFS
) |
320 LS_64(cqp
->hmc_profile
, I40IW_CQPHC_HMC_PROFILE
);
321 set_64bit_val(cqp
->host_ctx
, 16, temp
);
322 set_64bit_val(cqp
->host_ctx
, 24, (uintptr_t)cqp
);
323 set_64bit_val(cqp
->host_ctx
, 32, 0);
324 set_64bit_val(cqp
->host_ctx
, 40, 0);
325 set_64bit_val(cqp
->host_ctx
, 48, 0);
326 set_64bit_val(cqp
->host_ctx
, 56, 0);
328 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "CQP_HOST_CTX",
329 cqp
->host_ctx
, I40IW_CQP_CTX_SIZE
* 8);
331 p1
= RS_32_1(cqp
->host_ctx_pa
, 32);
332 p2
= (u32
)cqp
->host_ctx_pa
;
334 if (cqp
->dev
->is_pf
) {
335 i40iw_wr32(cqp
->dev
->hw
, I40E_PFPE_CCQPHIGH
, p1
);
336 i40iw_wr32(cqp
->dev
->hw
, I40E_PFPE_CCQPLOW
, p2
);
338 i40iw_wr32(cqp
->dev
->hw
, I40E_VFPE_CCQPHIGH1
, p1
);
339 i40iw_wr32(cqp
->dev
->hw
, I40E_VFPE_CCQPLOW1
, p2
);
342 if (cnt
++ > I40IW_DONE_COUNT
) {
343 i40iw_free_dma_mem(cqp
->dev
->hw
, &cqp
->sdbuf
);
344 ret_code
= I40IW_ERR_TIMEOUT
;
346 * read PFPE_CQPERRORCODES register to get the minor
347 * and major error code
350 err_code
= i40iw_rd32(cqp
->dev
->hw
, I40E_PFPE_CQPERRCODES
);
352 err_code
= i40iw_rd32(cqp
->dev
->hw
, I40E_VFPE_CQPERRCODES1
);
353 *min_err
= RS_32(err_code
, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE
);
354 *maj_err
= RS_32(err_code
, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE
);
357 udelay(I40IW_SLEEP_COUNT
);
359 val
= i40iw_rd32(cqp
->dev
->hw
, I40E_PFPE_CCQPSTATUS
);
361 val
= i40iw_rd32(cqp
->dev
->hw
, I40E_VFPE_CCQPSTATUS1
);
366 cqp
->process_cqp_sds
= i40iw_update_sds_noccq
;
371 * i40iw_sc_cqp_post_sq - post of cqp's sq
372 * @cqp: struct for cqp hw
374 void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp
*cqp
)
377 i40iw_wr32(cqp
->dev
->hw
, I40E_PFPE_CQPDB
, I40IW_RING_GETCURRENT_HEAD(cqp
->sq_ring
));
379 i40iw_wr32(cqp
->dev
->hw
, I40E_VFPE_CQPDB1
, I40IW_RING_GETCURRENT_HEAD(cqp
->sq_ring
));
381 i40iw_debug(cqp
->dev
,
383 "%s: HEAD_TAIL[%04d,%04d,%04d]\n",
391 * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
392 * @cqp: struct for cqp hw
393 * @wqe_idx: we index of cqp ring
395 u64
*i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp
*cqp
, u64 scratch
)
399 enum i40iw_status_code ret_code
;
401 if (I40IW_RING_FULL_ERR(cqp
->sq_ring
)) {
402 i40iw_debug(cqp
->dev
,
404 "%s: ring is full head %x tail %x size %x\n",
411 I40IW_ATOMIC_RING_MOVE_HEAD(cqp
->sq_ring
, wqe_idx
, ret_code
);
415 cqp
->polarity
= !cqp
->polarity
;
417 wqe
= cqp
->sq_base
[wqe_idx
].elem
;
418 cqp
->scratch_array
[wqe_idx
] = scratch
;
419 I40IW_CQP_INIT_WQE(wqe
);
425 * i40iw_sc_cqp_destroy - destroy cqp during close
426 * @cqp: struct for cqp hw
428 static enum i40iw_status_code
i40iw_sc_cqp_destroy(struct i40iw_sc_cqp
*cqp
)
430 u32 cnt
= 0, val
= 1;
431 enum i40iw_status_code ret_code
= 0;
434 if (cqp
->dev
->is_pf
) {
435 i40iw_wr32(cqp
->dev
->hw
, I40E_PFPE_CCQPHIGH
, 0);
436 i40iw_wr32(cqp
->dev
->hw
, I40E_PFPE_CCQPLOW
, 0);
437 cqpstat_addr
= I40E_PFPE_CCQPSTATUS
;
439 i40iw_wr32(cqp
->dev
->hw
, I40E_VFPE_CCQPHIGH1
, 0);
440 i40iw_wr32(cqp
->dev
->hw
, I40E_VFPE_CCQPLOW1
, 0);
441 cqpstat_addr
= I40E_VFPE_CCQPSTATUS1
;
444 if (cnt
++ > I40IW_DONE_COUNT
) {
445 ret_code
= I40IW_ERR_TIMEOUT
;
448 udelay(I40IW_SLEEP_COUNT
);
449 val
= i40iw_rd32(cqp
->dev
->hw
, cqpstat_addr
);
452 i40iw_free_dma_mem(cqp
->dev
->hw
, &cqp
->sdbuf
);
457 * i40iw_sc_ccq_arm - enable intr for control cq
458 * @ccq: ccq sc struct
460 static void i40iw_sc_ccq_arm(struct i40iw_sc_cq
*ccq
)
467 /* write to cq doorbell shadow area */
468 /* arm next se should always be zero */
469 get_64bit_val(ccq
->cq_uk
.shadow_area
, 32, &temp_val
);
471 sw_cq_sel
= (u16
)RS_64(temp_val
, I40IW_CQ_DBSA_SW_CQ_SELECT
);
472 arm_next_se
= (u8
)RS_64(temp_val
, I40IW_CQ_DBSA_ARM_NEXT_SE
);
474 arm_seq_num
= (u8
)RS_64(temp_val
, I40IW_CQ_DBSA_ARM_SEQ_NUM
);
477 temp_val
= LS_64(arm_seq_num
, I40IW_CQ_DBSA_ARM_SEQ_NUM
) |
478 LS_64(sw_cq_sel
, I40IW_CQ_DBSA_SW_CQ_SELECT
) |
479 LS_64(arm_next_se
, I40IW_CQ_DBSA_ARM_NEXT_SE
) |
480 LS_64(1, I40IW_CQ_DBSA_ARM_NEXT
);
482 set_64bit_val(ccq
->cq_uk
.shadow_area
, 32, temp_val
);
484 wmb(); /* make sure shadow area is updated before arming */
487 i40iw_wr32(ccq
->dev
->hw
, I40E_PFPE_CQARM
, ccq
->cq_uk
.cq_id
);
489 i40iw_wr32(ccq
->dev
->hw
, I40E_VFPE_CQARM1
, ccq
->cq_uk
.cq_id
);
493 * i40iw_sc_ccq_get_cqe_info - get ccq's cq entry
494 * @ccq: ccq sc struct
495 * @info: completion q entry to return
497 static enum i40iw_status_code
i40iw_sc_ccq_get_cqe_info(
498 struct i40iw_sc_cq
*ccq
,
499 struct i40iw_ccq_cqe_info
*info
)
501 u64 qp_ctx
, temp
, temp1
;
503 struct i40iw_sc_cqp
*cqp
;
506 enum i40iw_status_code ret_code
= 0;
508 if (ccq
->cq_uk
.avoid_mem_cflct
)
509 cqe
= (u64
*)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(&ccq
->cq_uk
);
511 cqe
= (u64
*)I40IW_GET_CURRENT_CQ_ELEMENT(&ccq
->cq_uk
);
513 get_64bit_val(cqe
, 24, &temp
);
514 polarity
= (u8
)RS_64(temp
, I40IW_CQ_VALID
);
515 if (polarity
!= ccq
->cq_uk
.polarity
)
516 return I40IW_ERR_QUEUE_EMPTY
;
518 get_64bit_val(cqe
, 8, &qp_ctx
);
519 cqp
= (struct i40iw_sc_cqp
*)(unsigned long)qp_ctx
;
520 info
->error
= (bool)RS_64(temp
, I40IW_CQ_ERROR
);
521 info
->min_err_code
= (u16
)RS_64(temp
, I40IW_CQ_MINERR
);
523 info
->maj_err_code
= (u16
)RS_64(temp
, I40IW_CQ_MAJERR
);
524 info
->min_err_code
= (u16
)RS_64(temp
, I40IW_CQ_MINERR
);
526 wqe_idx
= (u32
)RS_64(temp
, I40IW_CQ_WQEIDX
);
527 info
->scratch
= cqp
->scratch_array
[wqe_idx
];
529 get_64bit_val(cqe
, 16, &temp1
);
530 info
->op_ret_val
= (u32
)RS_64(temp1
, I40IW_CCQ_OPRETVAL
);
531 get_64bit_val(cqp
->sq_base
[wqe_idx
].elem
, 24, &temp1
);
532 info
->op_code
= (u8
)RS_64(temp1
, I40IW_CQPSQ_OPCODE
);
535 /* move the head for cq */
536 I40IW_RING_MOVE_HEAD(ccq
->cq_uk
.cq_ring
, ret_code
);
537 if (I40IW_RING_GETCURRENT_HEAD(ccq
->cq_uk
.cq_ring
) == 0)
538 ccq
->cq_uk
.polarity
^= 1;
540 /* update cq tail in cq shadow memory also */
541 I40IW_RING_MOVE_TAIL(ccq
->cq_uk
.cq_ring
);
542 set_64bit_val(ccq
->cq_uk
.shadow_area
,
544 I40IW_RING_GETCURRENT_HEAD(ccq
->cq_uk
.cq_ring
));
545 wmb(); /* write shadow area before tail */
546 I40IW_RING_MOVE_TAIL(cqp
->sq_ring
);
551 * i40iw_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
552 * @cqp: struct for cqp hw
553 * @op_code: cqp opcode for completion
554 * @info: completion q entry to return
556 static enum i40iw_status_code
i40iw_sc_poll_for_cqp_op_done(
557 struct i40iw_sc_cqp
*cqp
,
559 struct i40iw_ccq_cqe_info
*compl_info
)
561 struct i40iw_ccq_cqe_info info
;
562 struct i40iw_sc_cq
*ccq
;
563 enum i40iw_status_code ret_code
= 0;
566 memset(&info
, 0, sizeof(info
));
569 if (cnt
++ > I40IW_DONE_COUNT
)
570 return I40IW_ERR_TIMEOUT
;
572 if (i40iw_sc_ccq_get_cqe_info(ccq
, &info
)) {
573 udelay(I40IW_SLEEP_COUNT
);
578 ret_code
= I40IW_ERR_CQP_COMPL_ERROR
;
581 /* check if opcode is cq create */
582 if (op_code
!= info
.op_code
) {
583 i40iw_debug(cqp
->dev
, I40IW_DEBUG_WQE
,
584 "%s: opcode mismatch for my op code 0x%x, returned opcode %x\n",
585 __func__
, op_code
, info
.op_code
);
587 /* success, exit out of the loop */
588 if (op_code
== info
.op_code
)
593 memcpy(compl_info
, &info
, sizeof(*compl_info
));
599 * i40iw_sc_manage_push_page - Handle push page
600 * @cqp: struct for cqp hw
601 * @info: push page info
602 * @scratch: u64 saved to be used during cqp completion
603 * @post_sq: flag for cqp db to ring
605 static enum i40iw_status_code
i40iw_sc_manage_push_page(
606 struct i40iw_sc_cqp
*cqp
,
607 struct i40iw_cqp_manage_push_page_info
*info
,
614 if (info
->push_idx
>= I40IW_MAX_PUSH_PAGE_COUNT
)
615 return I40IW_ERR_INVALID_PUSH_PAGE_INDEX
;
617 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
619 return I40IW_ERR_RING_FULL
;
621 set_64bit_val(wqe
, 16, info
->qs_handle
);
623 header
= LS_64(info
->push_idx
, I40IW_CQPSQ_MPP_PPIDX
) |
624 LS_64(I40IW_CQP_OP_MANAGE_PUSH_PAGES
, I40IW_CQPSQ_OPCODE
) |
625 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
) |
626 LS_64(info
->free_page
, I40IW_CQPSQ_MPP_FREE_PAGE
);
628 i40iw_insert_wqe_hdr(wqe
, header
);
630 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "MANAGE_PUSH_PAGES WQE",
631 wqe
, I40IW_CQP_WQE_SIZE
* 8);
634 i40iw_sc_cqp_post_sq(cqp
);
639 * i40iw_sc_manage_hmc_pm_func_table - manage of function table
640 * @cqp: struct for cqp hw
641 * @scratch: u64 saved to be used during cqp completion
642 * @vf_index: vf index for cqp
643 * @free_pm_fcn: function number
644 * @post_sq: flag for cqp db to ring
646 static enum i40iw_status_code
i40iw_sc_manage_hmc_pm_func_table(
647 struct i40iw_sc_cqp
*cqp
,
656 if (vf_index
>= I40IW_MAX_VF_PER_PF
)
657 return I40IW_ERR_INVALID_VF_ID
;
658 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
660 return I40IW_ERR_RING_FULL
;
662 header
= LS_64(vf_index
, I40IW_CQPSQ_MHMC_VFIDX
) |
663 LS_64(I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE
, I40IW_CQPSQ_OPCODE
) |
664 LS_64(free_pm_fcn
, I40IW_CQPSQ_MHMC_FREEPMFN
) |
665 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
667 i40iw_insert_wqe_hdr(wqe
, header
);
668 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "MANAGE_HMC_PM_FUNC_TABLE WQE",
669 wqe
, I40IW_CQP_WQE_SIZE
* 8);
671 i40iw_sc_cqp_post_sq(cqp
);
676 * i40iw_sc_set_hmc_resource_profile - cqp wqe for hmc profile
677 * @cqp: struct for cqp hw
678 * @scratch: u64 saved to be used during cqp completion
679 * @hmc_profile_type: type of profile to set
680 * @vf_num: vf number for profile
681 * @post_sq: flag for cqp db to ring
682 * @poll_registers: flag to poll register for cqp completion
684 static enum i40iw_status_code
i40iw_sc_set_hmc_resource_profile(
685 struct i40iw_sc_cqp
*cqp
,
688 u8 vf_num
, bool post_sq
,
693 u32 val
, tail
, error
;
694 enum i40iw_status_code ret_code
= 0;
696 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
698 return I40IW_ERR_RING_FULL
;
700 set_64bit_val(wqe
, 16,
701 (LS_64(hmc_profile_type
, I40IW_CQPSQ_SHMCRP_HMC_PROFILE
) |
702 LS_64(vf_num
, I40IW_CQPSQ_SHMCRP_VFNUM
)));
704 header
= LS_64(I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE
, I40IW_CQPSQ_OPCODE
) |
705 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
707 i40iw_insert_wqe_hdr(wqe
, header
);
709 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "MANAGE_HMC_PM_FUNC_TABLE WQE",
710 wqe
, I40IW_CQP_WQE_SIZE
* 8);
712 i40iw_get_cqp_reg_info(cqp
, &val
, &tail
, &error
);
714 return I40IW_ERR_CQP_COMPL_ERROR
;
717 i40iw_sc_cqp_post_sq(cqp
);
719 ret_code
= i40iw_cqp_poll_registers(cqp
, tail
, 1000000);
721 ret_code
= i40iw_sc_poll_for_cqp_op_done(cqp
,
722 I40IW_CQP_OP_SHMC_PAGES_ALLOCATED
,
730 * i40iw_sc_manage_hmc_pm_func_table_done - wait for cqp wqe completion for function table
731 * @cqp: struct for cqp hw
733 static enum i40iw_status_code
i40iw_sc_manage_hmc_pm_func_table_done(struct i40iw_sc_cqp
*cqp
)
735 return i40iw_sc_poll_for_cqp_op_done(cqp
, I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE
, NULL
);
739 * i40iw_sc_commit_fpm_values_done - wait for cqp eqe completion for fpm commit
740 * @cqp: struct for cqp hw
742 static enum i40iw_status_code
i40iw_sc_commit_fpm_values_done(struct i40iw_sc_cqp
*cqp
)
744 return i40iw_sc_poll_for_cqp_op_done(cqp
, I40IW_CQP_OP_COMMIT_FPM_VALUES
, NULL
);
748 * i40iw_sc_commit_fpm_values - cqp wqe for commit fpm values
749 * @cqp: struct for cqp hw
750 * @scratch: u64 saved to be used during cqp completion
751 * @hmc_fn_id: hmc function id
752 * @commit_fpm_mem; Memory for fpm values
753 * @post_sq: flag for cqp db to ring
754 * @wait_type: poll ccq or cqp registers for cqp completion
756 static enum i40iw_status_code
i40iw_sc_commit_fpm_values(
757 struct i40iw_sc_cqp
*cqp
,
760 struct i40iw_dma_mem
*commit_fpm_mem
,
766 u32 tail
, val
, error
;
767 enum i40iw_status_code ret_code
= 0;
769 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
771 return I40IW_ERR_RING_FULL
;
773 set_64bit_val(wqe
, 16, hmc_fn_id
);
774 set_64bit_val(wqe
, 32, commit_fpm_mem
->pa
);
776 header
= LS_64(I40IW_CQP_OP_COMMIT_FPM_VALUES
, I40IW_CQPSQ_OPCODE
) |
777 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
779 i40iw_insert_wqe_hdr(wqe
, header
);
781 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "COMMIT_FPM_VALUES WQE",
782 wqe
, I40IW_CQP_WQE_SIZE
* 8);
784 i40iw_get_cqp_reg_info(cqp
, &val
, &tail
, &error
);
786 return I40IW_ERR_CQP_COMPL_ERROR
;
789 i40iw_sc_cqp_post_sq(cqp
);
791 if (wait_type
== I40IW_CQP_WAIT_POLL_REGS
)
792 ret_code
= i40iw_cqp_poll_registers(cqp
, tail
, I40IW_DONE_COUNT
);
793 else if (wait_type
== I40IW_CQP_WAIT_POLL_CQ
)
794 ret_code
= i40iw_sc_commit_fpm_values_done(cqp
);
801 * i40iw_sc_query_fpm_values_done - poll for cqp wqe completion for query fpm
802 * @cqp: struct for cqp hw
804 static enum i40iw_status_code
i40iw_sc_query_fpm_values_done(struct i40iw_sc_cqp
*cqp
)
806 return i40iw_sc_poll_for_cqp_op_done(cqp
, I40IW_CQP_OP_QUERY_FPM_VALUES
, NULL
);
810 * i40iw_sc_query_fpm_values - cqp wqe query fpm values
811 * @cqp: struct for cqp hw
812 * @scratch: u64 saved to be used during cqp completion
813 * @hmc_fn_id: hmc function id
814 * @query_fpm_mem: memory for return fpm values
815 * @post_sq: flag for cqp db to ring
816 * @wait_type: poll ccq or cqp registers for cqp completion
818 static enum i40iw_status_code
i40iw_sc_query_fpm_values(
819 struct i40iw_sc_cqp
*cqp
,
822 struct i40iw_dma_mem
*query_fpm_mem
,
828 u32 tail
, val
, error
;
829 enum i40iw_status_code ret_code
= 0;
831 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
833 return I40IW_ERR_RING_FULL
;
835 set_64bit_val(wqe
, 16, hmc_fn_id
);
836 set_64bit_val(wqe
, 32, query_fpm_mem
->pa
);
838 header
= LS_64(I40IW_CQP_OP_QUERY_FPM_VALUES
, I40IW_CQPSQ_OPCODE
) |
839 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
841 i40iw_insert_wqe_hdr(wqe
, header
);
843 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "QUERY_FPM WQE",
844 wqe
, I40IW_CQP_WQE_SIZE
* 8);
846 /* read the tail from CQP_TAIL register */
847 i40iw_get_cqp_reg_info(cqp
, &val
, &tail
, &error
);
850 return I40IW_ERR_CQP_COMPL_ERROR
;
853 i40iw_sc_cqp_post_sq(cqp
);
854 if (wait_type
== I40IW_CQP_WAIT_POLL_REGS
)
855 ret_code
= i40iw_cqp_poll_registers(cqp
, tail
, I40IW_DONE_COUNT
);
856 else if (wait_type
== I40IW_CQP_WAIT_POLL_CQ
)
857 ret_code
= i40iw_sc_query_fpm_values_done(cqp
);
864 * i40iw_sc_add_arp_cache_entry - cqp wqe add arp cache entry
865 * @cqp: struct for cqp hw
866 * @info: arp entry information
867 * @scratch: u64 saved to be used during cqp completion
868 * @post_sq: flag for cqp db to ring
870 static enum i40iw_status_code
i40iw_sc_add_arp_cache_entry(
871 struct i40iw_sc_cqp
*cqp
,
872 struct i40iw_add_arp_cache_entry_info
*info
,
879 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
881 return I40IW_ERR_RING_FULL
;
882 set_64bit_val(wqe
, 8, info
->reach_max
);
884 temp
= info
->mac_addr
[5] |
885 LS_64_1(info
->mac_addr
[4], 8) |
886 LS_64_1(info
->mac_addr
[3], 16) |
887 LS_64_1(info
->mac_addr
[2], 24) |
888 LS_64_1(info
->mac_addr
[1], 32) |
889 LS_64_1(info
->mac_addr
[0], 40);
891 set_64bit_val(wqe
, 16, temp
);
893 header
= info
->arp_index
|
894 LS_64(I40IW_CQP_OP_MANAGE_ARP
, I40IW_CQPSQ_OPCODE
) |
895 LS_64((info
->permanent
? 1 : 0), I40IW_CQPSQ_MAT_PERMANENT
) |
896 LS_64(1, I40IW_CQPSQ_MAT_ENTRYVALID
) |
897 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
899 i40iw_insert_wqe_hdr(wqe
, header
);
901 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "ARP_CACHE_ENTRY WQE",
902 wqe
, I40IW_CQP_WQE_SIZE
* 8);
905 i40iw_sc_cqp_post_sq(cqp
);
910 * i40iw_sc_del_arp_cache_entry - dele arp cache entry
911 * @cqp: struct for cqp hw
912 * @scratch: u64 saved to be used during cqp completion
913 * @arp_index: arp index to delete arp entry
914 * @post_sq: flag for cqp db to ring
916 static enum i40iw_status_code
i40iw_sc_del_arp_cache_entry(
917 struct i40iw_sc_cqp
*cqp
,
925 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
927 return I40IW_ERR_RING_FULL
;
930 LS_64(I40IW_CQP_OP_MANAGE_ARP
, I40IW_CQPSQ_OPCODE
) |
931 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
932 i40iw_insert_wqe_hdr(wqe
, header
);
934 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "ARP_CACHE_DEL_ENTRY WQE",
935 wqe
, I40IW_CQP_WQE_SIZE
* 8);
938 i40iw_sc_cqp_post_sq(cqp
);
943 * i40iw_sc_query_arp_cache_entry - cqp wqe to query arp and arp index
944 * @cqp: struct for cqp hw
945 * @scratch: u64 saved to be used during cqp completion
946 * @arp_index: arp index to delete arp entry
947 * @post_sq: flag for cqp db to ring
949 static enum i40iw_status_code
i40iw_sc_query_arp_cache_entry(
950 struct i40iw_sc_cqp
*cqp
,
958 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
960 return I40IW_ERR_RING_FULL
;
963 LS_64(I40IW_CQP_OP_MANAGE_ARP
, I40IW_CQPSQ_OPCODE
) |
964 LS_64(1, I40IW_CQPSQ_MAT_QUERY
) |
965 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
967 i40iw_insert_wqe_hdr(wqe
, header
);
969 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "QUERY_ARP_CACHE_ENTRY WQE",
970 wqe
, I40IW_CQP_WQE_SIZE
* 8);
973 i40iw_sc_cqp_post_sq(cqp
);
978 * i40iw_sc_manage_apbvt_entry - for adding and deleting apbvt entries
979 * @cqp: struct for cqp hw
980 * @info: info for apbvt entry to add or delete
981 * @scratch: u64 saved to be used during cqp completion
982 * @post_sq: flag for cqp db to ring
984 static enum i40iw_status_code
i40iw_sc_manage_apbvt_entry(
985 struct i40iw_sc_cqp
*cqp
,
986 struct i40iw_apbvt_info
*info
,
993 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
995 return I40IW_ERR_RING_FULL
;
997 set_64bit_val(wqe
, 16, info
->port
);
999 header
= LS_64(I40IW_CQP_OP_MANAGE_APBVT
, I40IW_CQPSQ_OPCODE
) |
1000 LS_64(info
->add
, I40IW_CQPSQ_MAPT_ADDPORT
) |
1001 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1003 i40iw_insert_wqe_hdr(wqe
, header
);
1005 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "MANAGE_APBVT WQE",
1006 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1009 i40iw_sc_cqp_post_sq(cqp
);
1014 * i40iw_sc_manage_qhash_table_entry - manage quad hash entries
1015 * @cqp: struct for cqp hw
1016 * @info: info for quad hash to manage
1017 * @scratch: u64 saved to be used during cqp completion
1018 * @post_sq: flag for cqp db to ring
1020 * This is called before connection establishment is started. For passive connections, when
1021 * listener is created, it will call with entry type of I40IW_QHASH_TYPE_TCP_SYN with local
1022 * ip address and tcp port. When SYN is received (passive connections) or
1023 * sent (active connections), this routine is called with entry type of
1024 * I40IW_QHASH_TYPE_TCP_ESTABLISHED and quad is passed in info.
1026 * When iwarp connection is done and its state moves to RTS, the quad hash entry in
1027 * the hardware will point to iwarp's qp number and requires no calls from the driver.
1029 static enum i40iw_status_code
i40iw_sc_manage_qhash_table_entry(
1030 struct i40iw_sc_cqp
*cqp
,
1031 struct i40iw_qhash_table_info
*info
,
1040 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1042 return I40IW_ERR_RING_FULL
;
1044 temp
= info
->mac_addr
[5] |
1045 LS_64_1(info
->mac_addr
[4], 8) |
1046 LS_64_1(info
->mac_addr
[3], 16) |
1047 LS_64_1(info
->mac_addr
[2], 24) |
1048 LS_64_1(info
->mac_addr
[1], 32) |
1049 LS_64_1(info
->mac_addr
[0], 40);
1051 set_64bit_val(wqe
, 0, temp
);
1053 qw1
= LS_64(info
->qp_num
, I40IW_CQPSQ_QHASH_QPN
) |
1054 LS_64(info
->dest_port
, I40IW_CQPSQ_QHASH_DEST_PORT
);
1055 if (info
->ipv4_valid
) {
1058 LS_64(info
->dest_ip
[0], I40IW_CQPSQ_QHASH_ADDR3
));
1062 LS_64(info
->dest_ip
[0], I40IW_CQPSQ_QHASH_ADDR0
) |
1063 LS_64(info
->dest_ip
[1], I40IW_CQPSQ_QHASH_ADDR1
));
1067 LS_64(info
->dest_ip
[2], I40IW_CQPSQ_QHASH_ADDR2
) |
1068 LS_64(info
->dest_ip
[3], I40IW_CQPSQ_QHASH_ADDR3
));
1070 qw2
= LS_64(cqp
->dev
->qs_handle
, I40IW_CQPSQ_QHASH_QS_HANDLE
);
1071 if (info
->vlan_valid
)
1072 qw2
|= LS_64(info
->vlan_id
, I40IW_CQPSQ_QHASH_VLANID
);
1073 set_64bit_val(wqe
, 16, qw2
);
1074 if (info
->entry_type
== I40IW_QHASH_TYPE_TCP_ESTABLISHED
) {
1075 qw1
|= LS_64(info
->src_port
, I40IW_CQPSQ_QHASH_SRC_PORT
);
1076 if (!info
->ipv4_valid
) {
1079 LS_64(info
->src_ip
[0], I40IW_CQPSQ_QHASH_ADDR0
) |
1080 LS_64(info
->src_ip
[1], I40IW_CQPSQ_QHASH_ADDR1
));
1083 LS_64(info
->src_ip
[2], I40IW_CQPSQ_QHASH_ADDR2
) |
1084 LS_64(info
->src_ip
[3], I40IW_CQPSQ_QHASH_ADDR3
));
1088 LS_64(info
->src_ip
[0], I40IW_CQPSQ_QHASH_ADDR3
));
1092 set_64bit_val(wqe
, 8, qw1
);
1093 temp
= LS_64(cqp
->polarity
, I40IW_CQPSQ_QHASH_WQEVALID
) |
1094 LS_64(I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY
, I40IW_CQPSQ_QHASH_OPCODE
) |
1095 LS_64(info
->manage
, I40IW_CQPSQ_QHASH_MANAGE
) |
1096 LS_64(info
->ipv4_valid
, I40IW_CQPSQ_QHASH_IPV4VALID
) |
1097 LS_64(info
->vlan_valid
, I40IW_CQPSQ_QHASH_VLANVALID
) |
1098 LS_64(info
->entry_type
, I40IW_CQPSQ_QHASH_ENTRYTYPE
);
1100 i40iw_insert_wqe_hdr(wqe
, temp
);
1102 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "MANAGE_QHASH WQE",
1103 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1106 i40iw_sc_cqp_post_sq(cqp
);
1111 * i40iw_sc_alloc_local_mac_ipaddr_entry - cqp wqe for loc mac entry
1112 * @cqp: struct for cqp hw
1113 * @scratch: u64 saved to be used during cqp completion
1114 * @post_sq: flag for cqp db to ring
1116 static enum i40iw_status_code
i40iw_sc_alloc_local_mac_ipaddr_entry(
1117 struct i40iw_sc_cqp
*cqp
,
1124 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1126 return I40IW_ERR_RING_FULL
;
1127 header
= LS_64(I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY
, I40IW_CQPSQ_OPCODE
) |
1128 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1130 i40iw_insert_wqe_hdr(wqe
, header
);
1131 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "ALLOCATE_LOCAL_MAC_IPADDR WQE",
1132 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1134 i40iw_sc_cqp_post_sq(cqp
);
1139 * i40iw_sc_add_local_mac_ipaddr_entry - add mac enry
1140 * @cqp: struct for cqp hw
1141 * @info:mac addr info
1142 * @scratch: u64 saved to be used during cqp completion
1143 * @post_sq: flag for cqp db to ring
1145 static enum i40iw_status_code
i40iw_sc_add_local_mac_ipaddr_entry(
1146 struct i40iw_sc_cqp
*cqp
,
1147 struct i40iw_local_mac_ipaddr_entry_info
*info
,
1154 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1156 return I40IW_ERR_RING_FULL
;
1157 temp
= info
->mac_addr
[5] |
1158 LS_64_1(info
->mac_addr
[4], 8) |
1159 LS_64_1(info
->mac_addr
[3], 16) |
1160 LS_64_1(info
->mac_addr
[2], 24) |
1161 LS_64_1(info
->mac_addr
[1], 32) |
1162 LS_64_1(info
->mac_addr
[0], 40);
1164 set_64bit_val(wqe
, 32, temp
);
1166 header
= LS_64(info
->entry_idx
, I40IW_CQPSQ_MLIPA_IPTABLEIDX
) |
1167 LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE
, I40IW_CQPSQ_OPCODE
) |
1168 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1170 i40iw_insert_wqe_hdr(wqe
, header
);
1172 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "ADD_LOCAL_MAC_IPADDR WQE",
1173 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1176 i40iw_sc_cqp_post_sq(cqp
);
1181 * i40iw_sc_del_local_mac_ipaddr_entry - cqp wqe to dele local mac
1182 * @cqp: struct for cqp hw
1183 * @scratch: u64 saved to be used during cqp completion
1184 * @entry_idx: index of mac entry
1185 * @ ignore_ref_count: to force mac adde delete
1186 * @post_sq: flag for cqp db to ring
1188 static enum i40iw_status_code
i40iw_sc_del_local_mac_ipaddr_entry(
1189 struct i40iw_sc_cqp
*cqp
,
1192 u8 ignore_ref_count
,
1198 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1200 return I40IW_ERR_RING_FULL
;
1201 header
= LS_64(entry_idx
, I40IW_CQPSQ_MLIPA_IPTABLEIDX
) |
1202 LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE
, I40IW_CQPSQ_OPCODE
) |
1203 LS_64(1, I40IW_CQPSQ_MLIPA_FREEENTRY
) |
1204 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
) |
1205 LS_64(ignore_ref_count
, I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT
);
1207 i40iw_insert_wqe_hdr(wqe
, header
);
1209 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "DEL_LOCAL_MAC_IPADDR WQE",
1210 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1213 i40iw_sc_cqp_post_sq(cqp
);
1218 * i40iw_sc_cqp_nop - send a nop wqe
1219 * @cqp: struct for cqp hw
1220 * @scratch: u64 saved to be used during cqp completion
1221 * @post_sq: flag for cqp db to ring
1223 static enum i40iw_status_code
i40iw_sc_cqp_nop(struct i40iw_sc_cqp
*cqp
,
1230 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1232 return I40IW_ERR_RING_FULL
;
1233 header
= LS_64(I40IW_CQP_OP_NOP
, I40IW_CQPSQ_OPCODE
) |
1234 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1235 i40iw_insert_wqe_hdr(wqe
, header
);
1236 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "NOP WQE",
1237 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1240 i40iw_sc_cqp_post_sq(cqp
);
1245 * i40iw_sc_ceq_init - initialize ceq
1246 * @ceq: ceq sc structure
1247 * @info: ceq initialization info
1249 static enum i40iw_status_code
i40iw_sc_ceq_init(struct i40iw_sc_ceq
*ceq
,
1250 struct i40iw_ceq_init_info
*info
)
1254 if ((info
->elem_cnt
< I40IW_MIN_CEQ_ENTRIES
) ||
1255 (info
->elem_cnt
> I40IW_MAX_CEQ_ENTRIES
))
1256 return I40IW_ERR_INVALID_SIZE
;
1258 if (info
->ceq_id
>= I40IW_MAX_CEQID
)
1259 return I40IW_ERR_INVALID_CEQ_ID
;
1261 pble_obj_cnt
= info
->dev
->hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
;
1263 if (info
->virtual_map
&& (info
->first_pm_pbl_idx
>= pble_obj_cnt
))
1264 return I40IW_ERR_INVALID_PBLE_INDEX
;
1266 ceq
->size
= sizeof(*ceq
);
1267 ceq
->ceqe_base
= (struct i40iw_ceqe
*)info
->ceqe_base
;
1268 ceq
->ceq_id
= info
->ceq_id
;
1269 ceq
->dev
= info
->dev
;
1270 ceq
->elem_cnt
= info
->elem_cnt
;
1271 ceq
->ceq_elem_pa
= info
->ceqe_pa
;
1272 ceq
->virtual_map
= info
->virtual_map
;
1274 ceq
->pbl_chunk_size
= (ceq
->virtual_map
? info
->pbl_chunk_size
: 0);
1275 ceq
->first_pm_pbl_idx
= (ceq
->virtual_map
? info
->first_pm_pbl_idx
: 0);
1276 ceq
->pbl_list
= (ceq
->virtual_map
? info
->pbl_list
: NULL
);
1278 ceq
->tph_en
= info
->tph_en
;
1279 ceq
->tph_val
= info
->tph_val
;
1281 I40IW_RING_INIT(ceq
->ceq_ring
, ceq
->elem_cnt
);
1282 ceq
->dev
->ceq
[info
->ceq_id
] = ceq
;
1288 * i40iw_sc_ceq_create - create ceq wqe
1289 * @ceq: ceq sc structure
1290 * @scratch: u64 saved to be used during cqp completion
1291 * @post_sq: flag for cqp db to ring
1293 static enum i40iw_status_code
i40iw_sc_ceq_create(struct i40iw_sc_ceq
*ceq
,
1297 struct i40iw_sc_cqp
*cqp
;
1301 cqp
= ceq
->dev
->cqp
;
1302 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1304 return I40IW_ERR_RING_FULL
;
1305 set_64bit_val(wqe
, 16, ceq
->elem_cnt
);
1306 set_64bit_val(wqe
, 32, (ceq
->virtual_map
? 0 : ceq
->ceq_elem_pa
));
1307 set_64bit_val(wqe
, 48, (ceq
->virtual_map
? ceq
->first_pm_pbl_idx
: 0));
1308 set_64bit_val(wqe
, 56, LS_64(ceq
->tph_val
, I40IW_CQPSQ_TPHVAL
));
1310 header
= ceq
->ceq_id
|
1311 LS_64(I40IW_CQP_OP_CREATE_CEQ
, I40IW_CQPSQ_OPCODE
) |
1312 LS_64(ceq
->pbl_chunk_size
, I40IW_CQPSQ_CEQ_LPBLSIZE
) |
1313 LS_64(ceq
->virtual_map
, I40IW_CQPSQ_CEQ_VMAP
) |
1314 LS_64(ceq
->tph_en
, I40IW_CQPSQ_TPHEN
) |
1315 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1317 i40iw_insert_wqe_hdr(wqe
, header
);
1319 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "CEQ_CREATE WQE",
1320 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1323 i40iw_sc_cqp_post_sq(cqp
);
1328 * i40iw_sc_cceq_create_done - poll for control ceq wqe to complete
1329 * @ceq: ceq sc structure
1331 static enum i40iw_status_code
i40iw_sc_cceq_create_done(struct i40iw_sc_ceq
*ceq
)
1333 struct i40iw_sc_cqp
*cqp
;
1335 cqp
= ceq
->dev
->cqp
;
1336 return i40iw_sc_poll_for_cqp_op_done(cqp
, I40IW_CQP_OP_CREATE_CEQ
, NULL
);
1340 * i40iw_sc_cceq_destroy_done - poll for destroy cceq to complete
1341 * @ceq: ceq sc structure
1343 static enum i40iw_status_code
i40iw_sc_cceq_destroy_done(struct i40iw_sc_ceq
*ceq
)
1345 struct i40iw_sc_cqp
*cqp
;
1347 cqp
= ceq
->dev
->cqp
;
1348 cqp
->process_cqp_sds
= i40iw_update_sds_noccq
;
1349 return i40iw_sc_poll_for_cqp_op_done(cqp
, I40IW_CQP_OP_DESTROY_CEQ
, NULL
);
1353 * i40iw_sc_cceq_create - create cceq
1354 * @ceq: ceq sc structure
1355 * @scratch: u64 saved to be used during cqp completion
1357 static enum i40iw_status_code
i40iw_sc_cceq_create(struct i40iw_sc_ceq
*ceq
, u64 scratch
)
1359 enum i40iw_status_code ret_code
;
1361 ret_code
= i40iw_sc_ceq_create(ceq
, scratch
, true);
1363 ret_code
= i40iw_sc_cceq_create_done(ceq
);
1368 * i40iw_sc_ceq_destroy - destroy ceq
1369 * @ceq: ceq sc structure
1370 * @scratch: u64 saved to be used during cqp completion
1371 * @post_sq: flag for cqp db to ring
1373 static enum i40iw_status_code
i40iw_sc_ceq_destroy(struct i40iw_sc_ceq
*ceq
,
1377 struct i40iw_sc_cqp
*cqp
;
1381 cqp
= ceq
->dev
->cqp
;
1382 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1384 return I40IW_ERR_RING_FULL
;
1385 set_64bit_val(wqe
, 16, ceq
->elem_cnt
);
1386 set_64bit_val(wqe
, 48, ceq
->first_pm_pbl_idx
);
1387 header
= ceq
->ceq_id
|
1388 LS_64(I40IW_CQP_OP_DESTROY_CEQ
, I40IW_CQPSQ_OPCODE
) |
1389 LS_64(ceq
->pbl_chunk_size
, I40IW_CQPSQ_CEQ_LPBLSIZE
) |
1390 LS_64(ceq
->virtual_map
, I40IW_CQPSQ_CEQ_VMAP
) |
1391 LS_64(ceq
->tph_en
, I40IW_CQPSQ_TPHEN
) |
1392 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1393 i40iw_insert_wqe_hdr(wqe
, header
);
1394 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "CEQ_DESTROY WQE",
1395 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1398 i40iw_sc_cqp_post_sq(cqp
);
1403 * i40iw_sc_process_ceq - process ceq
1404 * @dev: sc device struct
1405 * @ceq: ceq sc structure
1407 static void *i40iw_sc_process_ceq(struct i40iw_sc_dev
*dev
, struct i40iw_sc_ceq
*ceq
)
1411 struct i40iw_sc_cq
*cq
= NULL
;
1414 ceqe
= (u64
*)I40IW_GET_CURRENT_CEQ_ELEMENT(ceq
);
1415 get_64bit_val(ceqe
, 0, &temp
);
1416 polarity
= (u8
)RS_64(temp
, I40IW_CEQE_VALID
);
1417 if (polarity
!= ceq
->polarity
)
1420 cq
= (struct i40iw_sc_cq
*)(unsigned long)LS_64_1(temp
, 1);
1422 I40IW_RING_MOVE_TAIL(ceq
->ceq_ring
);
1423 if (I40IW_RING_GETCURRENT_TAIL(ceq
->ceq_ring
) == 0)
1427 i40iw_wr32(dev
->hw
, I40E_PFPE_CQACK
, cq
->cq_uk
.cq_id
);
1429 i40iw_wr32(dev
->hw
, I40E_VFPE_CQACK1
, cq
->cq_uk
.cq_id
);
1435 * i40iw_sc_aeq_init - initialize aeq
1436 * @aeq: aeq structure ptr
1437 * @info: aeq initialization info
1439 static enum i40iw_status_code
i40iw_sc_aeq_init(struct i40iw_sc_aeq
*aeq
,
1440 struct i40iw_aeq_init_info
*info
)
1444 if ((info
->elem_cnt
< I40IW_MIN_AEQ_ENTRIES
) ||
1445 (info
->elem_cnt
> I40IW_MAX_AEQ_ENTRIES
))
1446 return I40IW_ERR_INVALID_SIZE
;
1447 pble_obj_cnt
= info
->dev
->hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
;
1449 if (info
->virtual_map
&& (info
->first_pm_pbl_idx
>= pble_obj_cnt
))
1450 return I40IW_ERR_INVALID_PBLE_INDEX
;
1452 aeq
->size
= sizeof(*aeq
);
1454 aeq
->aeqe_base
= (struct i40iw_sc_aeqe
*)info
->aeqe_base
;
1455 aeq
->dev
= info
->dev
;
1456 aeq
->elem_cnt
= info
->elem_cnt
;
1458 aeq
->aeq_elem_pa
= info
->aeq_elem_pa
;
1459 I40IW_RING_INIT(aeq
->aeq_ring
, aeq
->elem_cnt
);
1460 info
->dev
->aeq
= aeq
;
1462 aeq
->virtual_map
= info
->virtual_map
;
1463 aeq
->pbl_list
= (aeq
->virtual_map
? info
->pbl_list
: NULL
);
1464 aeq
->pbl_chunk_size
= (aeq
->virtual_map
? info
->pbl_chunk_size
: 0);
1465 aeq
->first_pm_pbl_idx
= (aeq
->virtual_map
? info
->first_pm_pbl_idx
: 0);
1466 info
->dev
->aeq
= aeq
;
1471 * i40iw_sc_aeq_create - create aeq
1472 * @aeq: aeq structure ptr
1473 * @scratch: u64 saved to be used during cqp completion
1474 * @post_sq: flag for cqp db to ring
1476 static enum i40iw_status_code
i40iw_sc_aeq_create(struct i40iw_sc_aeq
*aeq
,
1481 struct i40iw_sc_cqp
*cqp
;
1484 cqp
= aeq
->dev
->cqp
;
1485 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1487 return I40IW_ERR_RING_FULL
;
1488 set_64bit_val(wqe
, 16, aeq
->elem_cnt
);
1489 set_64bit_val(wqe
, 32,
1490 (aeq
->virtual_map
? 0 : aeq
->aeq_elem_pa
));
1491 set_64bit_val(wqe
, 48,
1492 (aeq
->virtual_map
? aeq
->first_pm_pbl_idx
: 0));
1494 header
= LS_64(I40IW_CQP_OP_CREATE_AEQ
, I40IW_CQPSQ_OPCODE
) |
1495 LS_64(aeq
->pbl_chunk_size
, I40IW_CQPSQ_AEQ_LPBLSIZE
) |
1496 LS_64(aeq
->virtual_map
, I40IW_CQPSQ_AEQ_VMAP
) |
1497 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1499 i40iw_insert_wqe_hdr(wqe
, header
);
1500 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "AEQ_CREATE WQE",
1501 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1503 i40iw_sc_cqp_post_sq(cqp
);
1508 * i40iw_sc_aeq_destroy - destroy aeq during close
1509 * @aeq: aeq structure ptr
1510 * @scratch: u64 saved to be used during cqp completion
1511 * @post_sq: flag for cqp db to ring
1513 static enum i40iw_status_code
i40iw_sc_aeq_destroy(struct i40iw_sc_aeq
*aeq
,
1518 struct i40iw_sc_cqp
*cqp
;
1521 cqp
= aeq
->dev
->cqp
;
1522 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1524 return I40IW_ERR_RING_FULL
;
1525 set_64bit_val(wqe
, 16, aeq
->elem_cnt
);
1526 set_64bit_val(wqe
, 48, aeq
->first_pm_pbl_idx
);
1527 header
= LS_64(I40IW_CQP_OP_DESTROY_AEQ
, I40IW_CQPSQ_OPCODE
) |
1528 LS_64(aeq
->pbl_chunk_size
, I40IW_CQPSQ_AEQ_LPBLSIZE
) |
1529 LS_64(aeq
->virtual_map
, I40IW_CQPSQ_AEQ_VMAP
) |
1530 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1531 i40iw_insert_wqe_hdr(wqe
, header
);
1533 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "AEQ_DESTROY WQE",
1534 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1536 i40iw_sc_cqp_post_sq(cqp
);
1541 * i40iw_sc_get_next_aeqe - get next aeq entry
1542 * @aeq: aeq structure ptr
1543 * @info: aeqe info to be returned
1545 static enum i40iw_status_code
i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq
*aeq
,
1546 struct i40iw_aeqe_info
*info
)
1548 u64 temp
, compl_ctx
;
1554 aeqe
= (u64
*)I40IW_GET_CURRENT_AEQ_ELEMENT(aeq
);
1555 get_64bit_val(aeqe
, 0, &compl_ctx
);
1556 get_64bit_val(aeqe
, 8, &temp
);
1557 polarity
= (u8
)RS_64(temp
, I40IW_AEQE_VALID
);
1559 if (aeq
->polarity
!= polarity
)
1560 return I40IW_ERR_QUEUE_EMPTY
;
1562 i40iw_debug_buf(aeq
->dev
, I40IW_DEBUG_WQE
, "AEQ_ENTRY", aeqe
, 16);
1564 ae_src
= (u8
)RS_64(temp
, I40IW_AEQE_AESRC
);
1565 wqe_idx
= (u16
)RS_64(temp
, I40IW_AEQE_WQDESCIDX
);
1566 info
->qp_cq_id
= (u32
)RS_64(temp
, I40IW_AEQE_QPCQID
);
1567 info
->ae_id
= (u16
)RS_64(temp
, I40IW_AEQE_AECODE
);
1568 info
->tcp_state
= (u8
)RS_64(temp
, I40IW_AEQE_TCPSTATE
);
1569 info
->iwarp_state
= (u8
)RS_64(temp
, I40IW_AEQE_IWSTATE
);
1570 info
->q2_data_written
= (u8
)RS_64(temp
, I40IW_AEQE_Q2DATA
);
1571 info
->aeqe_overflow
= (bool)RS_64(temp
, I40IW_AEQE_OVERFLOW
);
1573 case I40IW_AE_SOURCE_RQ
:
1574 case I40IW_AE_SOURCE_RQ_0011
:
1576 info
->wqe_idx
= wqe_idx
;
1577 info
->compl_ctx
= compl_ctx
;
1579 case I40IW_AE_SOURCE_CQ
:
1580 case I40IW_AE_SOURCE_CQ_0110
:
1581 case I40IW_AE_SOURCE_CQ_1010
:
1582 case I40IW_AE_SOURCE_CQ_1110
:
1584 info
->compl_ctx
= LS_64_1(compl_ctx
, 1);
1586 case I40IW_AE_SOURCE_SQ
:
1587 case I40IW_AE_SOURCE_SQ_0111
:
1590 info
->wqe_idx
= wqe_idx
;
1591 info
->compl_ctx
= compl_ctx
;
1593 case I40IW_AE_SOURCE_IN_RR_WR
:
1594 case I40IW_AE_SOURCE_IN_RR_WR_1011
:
1596 info
->compl_ctx
= compl_ctx
;
1597 info
->in_rdrsp_wr
= true;
1599 case I40IW_AE_SOURCE_OUT_RR
:
1600 case I40IW_AE_SOURCE_OUT_RR_1111
:
1602 info
->compl_ctx
= compl_ctx
;
1603 info
->out_rdrsp
= true;
1608 I40IW_RING_MOVE_TAIL(aeq
->aeq_ring
);
1609 if (I40IW_RING_GETCURRENT_TAIL(aeq
->aeq_ring
) == 0)
1615 * i40iw_sc_repost_aeq_entries - repost completed aeq entries
1616 * @dev: sc device struct
1617 * @count: allocate count
1619 static enum i40iw_status_code
i40iw_sc_repost_aeq_entries(struct i40iw_sc_dev
*dev
,
1622 if (count
> I40IW_MAX_AEQ_ALLOCATE_COUNT
)
1623 return I40IW_ERR_INVALID_SIZE
;
1626 i40iw_wr32(dev
->hw
, I40E_PFPE_AEQALLOC
, count
);
1628 i40iw_wr32(dev
->hw
, I40E_VFPE_AEQALLOC1
, count
);
1634 * i40iw_sc_aeq_create_done - create aeq
1635 * @aeq: aeq structure ptr
1637 static enum i40iw_status_code
i40iw_sc_aeq_create_done(struct i40iw_sc_aeq
*aeq
)
1639 struct i40iw_sc_cqp
*cqp
;
1641 cqp
= aeq
->dev
->cqp
;
1642 return i40iw_sc_poll_for_cqp_op_done(cqp
, I40IW_CQP_OP_CREATE_AEQ
, NULL
);
1646 * i40iw_sc_aeq_destroy_done - destroy of aeq during close
1647 * @aeq: aeq structure ptr
1649 static enum i40iw_status_code
i40iw_sc_aeq_destroy_done(struct i40iw_sc_aeq
*aeq
)
1651 struct i40iw_sc_cqp
*cqp
;
1653 cqp
= aeq
->dev
->cqp
;
1654 return i40iw_sc_poll_for_cqp_op_done(cqp
, I40IW_CQP_OP_DESTROY_AEQ
, NULL
);
1658 * i40iw_sc_ccq_init - initialize control cq
1659 * @cq: sc's cq ctruct
1660 * @info: info for control cq initialization
1662 static enum i40iw_status_code
i40iw_sc_ccq_init(struct i40iw_sc_cq
*cq
,
1663 struct i40iw_ccq_init_info
*info
)
1667 if (info
->num_elem
< I40IW_MIN_CQ_SIZE
|| info
->num_elem
> I40IW_MAX_CQ_SIZE
)
1668 return I40IW_ERR_INVALID_SIZE
;
1670 if (info
->ceq_id
> I40IW_MAX_CEQID
)
1671 return I40IW_ERR_INVALID_CEQ_ID
;
1673 pble_obj_cnt
= info
->dev
->hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
;
1675 if (info
->virtual_map
&& (info
->first_pm_pbl_idx
>= pble_obj_cnt
))
1676 return I40IW_ERR_INVALID_PBLE_INDEX
;
1678 cq
->cq_pa
= info
->cq_pa
;
1679 cq
->cq_uk
.cq_base
= info
->cq_base
;
1680 cq
->shadow_area_pa
= info
->shadow_area_pa
;
1681 cq
->cq_uk
.shadow_area
= info
->shadow_area
;
1682 cq
->shadow_read_threshold
= info
->shadow_read_threshold
;
1683 cq
->dev
= info
->dev
;
1684 cq
->ceq_id
= info
->ceq_id
;
1685 cq
->cq_uk
.cq_size
= info
->num_elem
;
1686 cq
->cq_type
= I40IW_CQ_TYPE_CQP
;
1687 cq
->ceqe_mask
= info
->ceqe_mask
;
1688 I40IW_RING_INIT(cq
->cq_uk
.cq_ring
, info
->num_elem
);
1690 cq
->cq_uk
.cq_id
= 0; /* control cq is id 0 always */
1691 cq
->ceq_id_valid
= info
->ceq_id_valid
;
1692 cq
->tph_en
= info
->tph_en
;
1693 cq
->tph_val
= info
->tph_val
;
1694 cq
->cq_uk
.avoid_mem_cflct
= info
->avoid_mem_cflct
;
1696 cq
->pbl_list
= info
->pbl_list
;
1697 cq
->virtual_map
= info
->virtual_map
;
1698 cq
->pbl_chunk_size
= info
->pbl_chunk_size
;
1699 cq
->first_pm_pbl_idx
= info
->first_pm_pbl_idx
;
1700 cq
->cq_uk
.polarity
= true;
1702 /* following are only for iw cqs so initialize them to zero */
1703 cq
->cq_uk
.cqe_alloc_reg
= NULL
;
1704 info
->dev
->ccq
= cq
;
1709 * i40iw_sc_ccq_create_done - poll cqp for ccq create
1710 * @ccq: ccq sc struct
1712 static enum i40iw_status_code
i40iw_sc_ccq_create_done(struct i40iw_sc_cq
*ccq
)
1714 struct i40iw_sc_cqp
*cqp
;
1716 cqp
= ccq
->dev
->cqp
;
1717 return i40iw_sc_poll_for_cqp_op_done(cqp
, I40IW_CQP_OP_CREATE_CQ
, NULL
);
1721 * i40iw_sc_ccq_create - create control cq
1722 * @ccq: ccq sc struct
1723 * @scratch: u64 saved to be used during cqp completion
1724 * @check_overflow: overlow flag for ccq
1725 * @post_sq: flag for cqp db to ring
1727 static enum i40iw_status_code
i40iw_sc_ccq_create(struct i40iw_sc_cq
*ccq
,
1729 bool check_overflow
,
1733 struct i40iw_sc_cqp
*cqp
;
1735 enum i40iw_status_code ret_code
;
1737 cqp
= ccq
->dev
->cqp
;
1738 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1740 return I40IW_ERR_RING_FULL
;
1741 set_64bit_val(wqe
, 0, ccq
->cq_uk
.cq_size
);
1742 set_64bit_val(wqe
, 8, RS_64_1(ccq
, 1));
1743 set_64bit_val(wqe
, 16,
1744 LS_64(ccq
->shadow_read_threshold
, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD
));
1745 set_64bit_val(wqe
, 32, (ccq
->virtual_map
? 0 : ccq
->cq_pa
));
1746 set_64bit_val(wqe
, 40, ccq
->shadow_area_pa
);
1747 set_64bit_val(wqe
, 48,
1748 (ccq
->virtual_map
? ccq
->first_pm_pbl_idx
: 0));
1749 set_64bit_val(wqe
, 56,
1750 LS_64(ccq
->tph_val
, I40IW_CQPSQ_TPHVAL
));
1752 header
= ccq
->cq_uk
.cq_id
|
1753 LS_64((ccq
->ceq_id_valid
? ccq
->ceq_id
: 0), I40IW_CQPSQ_CQ_CEQID
) |
1754 LS_64(I40IW_CQP_OP_CREATE_CQ
, I40IW_CQPSQ_OPCODE
) |
1755 LS_64(ccq
->pbl_chunk_size
, I40IW_CQPSQ_CQ_LPBLSIZE
) |
1756 LS_64(check_overflow
, I40IW_CQPSQ_CQ_CHKOVERFLOW
) |
1757 LS_64(ccq
->virtual_map
, I40IW_CQPSQ_CQ_VIRTMAP
) |
1758 LS_64(ccq
->ceqe_mask
, I40IW_CQPSQ_CQ_ENCEQEMASK
) |
1759 LS_64(ccq
->ceq_id_valid
, I40IW_CQPSQ_CQ_CEQIDVALID
) |
1760 LS_64(ccq
->tph_en
, I40IW_CQPSQ_TPHEN
) |
1761 LS_64(ccq
->cq_uk
.avoid_mem_cflct
, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT
) |
1762 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1764 i40iw_insert_wqe_hdr(wqe
, header
);
1766 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "CCQ_CREATE WQE",
1767 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1770 i40iw_sc_cqp_post_sq(cqp
);
1771 ret_code
= i40iw_sc_ccq_create_done(ccq
);
1775 cqp
->process_cqp_sds
= i40iw_cqp_sds_cmd
;
1781 * i40iw_sc_ccq_destroy - destroy ccq during close
1782 * @ccq: ccq sc struct
1783 * @scratch: u64 saved to be used during cqp completion
1784 * @post_sq: flag for cqp db to ring
1786 static enum i40iw_status_code
i40iw_sc_ccq_destroy(struct i40iw_sc_cq
*ccq
,
1790 struct i40iw_sc_cqp
*cqp
;
1793 enum i40iw_status_code ret_code
= 0;
1794 u32 tail
, val
, error
;
1796 cqp
= ccq
->dev
->cqp
;
1797 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1799 return I40IW_ERR_RING_FULL
;
1800 set_64bit_val(wqe
, 0, ccq
->cq_uk
.cq_size
);
1801 set_64bit_val(wqe
, 8, RS_64_1(ccq
, 1));
1802 set_64bit_val(wqe
, 40, ccq
->shadow_area_pa
);
1804 header
= ccq
->cq_uk
.cq_id
|
1805 LS_64((ccq
->ceq_id_valid
? ccq
->ceq_id
: 0), I40IW_CQPSQ_CQ_CEQID
) |
1806 LS_64(I40IW_CQP_OP_DESTROY_CQ
, I40IW_CQPSQ_OPCODE
) |
1807 LS_64(ccq
->ceqe_mask
, I40IW_CQPSQ_CQ_ENCEQEMASK
) |
1808 LS_64(ccq
->ceq_id_valid
, I40IW_CQPSQ_CQ_CEQIDVALID
) |
1809 LS_64(ccq
->tph_en
, I40IW_CQPSQ_TPHEN
) |
1810 LS_64(ccq
->cq_uk
.avoid_mem_cflct
, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT
) |
1811 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1813 i40iw_insert_wqe_hdr(wqe
, header
);
1815 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "CCQ_DESTROY WQE",
1816 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1818 i40iw_get_cqp_reg_info(cqp
, &val
, &tail
, &error
);
1820 return I40IW_ERR_CQP_COMPL_ERROR
;
1823 i40iw_sc_cqp_post_sq(cqp
);
1824 ret_code
= i40iw_cqp_poll_registers(cqp
, tail
, 1000);
1831 * i40iw_sc_cq_init - initialize completion q
1833 * @info: cq initialization info
1835 static enum i40iw_status_code
i40iw_sc_cq_init(struct i40iw_sc_cq
*cq
,
1836 struct i40iw_cq_init_info
*info
)
1838 u32 __iomem
*cqe_alloc_reg
= NULL
;
1839 enum i40iw_status_code ret_code
;
1843 pble_obj_cnt
= info
->dev
->hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
;
1845 if (info
->virtual_map
&& (info
->first_pm_pbl_idx
>= pble_obj_cnt
))
1846 return I40IW_ERR_INVALID_PBLE_INDEX
;
1848 cq
->cq_pa
= info
->cq_base_pa
;
1849 cq
->dev
= info
->dev
;
1850 cq
->ceq_id
= info
->ceq_id
;
1851 arm_offset
= (info
->dev
->is_pf
) ? I40E_PFPE_CQARM
: I40E_VFPE_CQARM1
;
1852 if (i40iw_get_hw_addr(cq
->dev
))
1853 cqe_alloc_reg
= (u32 __iomem
*)(i40iw_get_hw_addr(cq
->dev
) +
1855 info
->cq_uk_init_info
.cqe_alloc_reg
= cqe_alloc_reg
;
1856 ret_code
= i40iw_cq_uk_init(&cq
->cq_uk
, &info
->cq_uk_init_info
);
1859 cq
->virtual_map
= info
->virtual_map
;
1860 cq
->pbl_chunk_size
= info
->pbl_chunk_size
;
1861 cq
->ceqe_mask
= info
->ceqe_mask
;
1862 cq
->cq_type
= (info
->type
) ? info
->type
: I40IW_CQ_TYPE_IWARP
;
1864 cq
->shadow_area_pa
= info
->shadow_area_pa
;
1865 cq
->shadow_read_threshold
= info
->shadow_read_threshold
;
1867 cq
->ceq_id_valid
= info
->ceq_id_valid
;
1868 cq
->tph_en
= info
->tph_en
;
1869 cq
->tph_val
= info
->tph_val
;
1871 cq
->first_pm_pbl_idx
= info
->first_pm_pbl_idx
;
1877 * i40iw_sc_cq_create - create completion q
1879 * @scratch: u64 saved to be used during cqp completion
1880 * @check_overflow: flag for overflow check
1881 * @post_sq: flag for cqp db to ring
1883 static enum i40iw_status_code
i40iw_sc_cq_create(struct i40iw_sc_cq
*cq
,
1885 bool check_overflow
,
1889 struct i40iw_sc_cqp
*cqp
;
1892 if (cq
->cq_uk
.cq_id
> I40IW_MAX_CQID
)
1893 return I40IW_ERR_INVALID_CQ_ID
;
1895 if (cq
->ceq_id
> I40IW_MAX_CEQID
)
1896 return I40IW_ERR_INVALID_CEQ_ID
;
1899 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1901 return I40IW_ERR_RING_FULL
;
1903 set_64bit_val(wqe
, 0, cq
->cq_uk
.cq_size
);
1904 set_64bit_val(wqe
, 8, RS_64_1(cq
, 1));
1907 LS_64(cq
->shadow_read_threshold
, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD
));
1909 set_64bit_val(wqe
, 32, (cq
->virtual_map
? 0 : cq
->cq_pa
));
1911 set_64bit_val(wqe
, 40, cq
->shadow_area_pa
);
1912 set_64bit_val(wqe
, 48, (cq
->virtual_map
? cq
->first_pm_pbl_idx
: 0));
1913 set_64bit_val(wqe
, 56, LS_64(cq
->tph_val
, I40IW_CQPSQ_TPHVAL
));
1915 header
= cq
->cq_uk
.cq_id
|
1916 LS_64((cq
->ceq_id_valid
? cq
->ceq_id
: 0), I40IW_CQPSQ_CQ_CEQID
) |
1917 LS_64(I40IW_CQP_OP_CREATE_CQ
, I40IW_CQPSQ_OPCODE
) |
1918 LS_64(cq
->pbl_chunk_size
, I40IW_CQPSQ_CQ_LPBLSIZE
) |
1919 LS_64(check_overflow
, I40IW_CQPSQ_CQ_CHKOVERFLOW
) |
1920 LS_64(cq
->virtual_map
, I40IW_CQPSQ_CQ_VIRTMAP
) |
1921 LS_64(cq
->ceqe_mask
, I40IW_CQPSQ_CQ_ENCEQEMASK
) |
1922 LS_64(cq
->ceq_id_valid
, I40IW_CQPSQ_CQ_CEQIDVALID
) |
1923 LS_64(cq
->tph_en
, I40IW_CQPSQ_TPHEN
) |
1924 LS_64(cq
->cq_uk
.avoid_mem_cflct
, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT
) |
1925 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1927 i40iw_insert_wqe_hdr(wqe
, header
);
1929 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "CQ_CREATE WQE",
1930 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1933 i40iw_sc_cqp_post_sq(cqp
);
1938 * i40iw_sc_cq_destroy - destroy completion q
1940 * @scratch: u64 saved to be used during cqp completion
1941 * @post_sq: flag for cqp db to ring
1943 static enum i40iw_status_code
i40iw_sc_cq_destroy(struct i40iw_sc_cq
*cq
,
1947 struct i40iw_sc_cqp
*cqp
;
1952 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
1954 return I40IW_ERR_RING_FULL
;
1955 set_64bit_val(wqe
, 0, cq
->cq_uk
.cq_size
);
1956 set_64bit_val(wqe
, 8, RS_64_1(cq
, 1));
1957 set_64bit_val(wqe
, 40, cq
->shadow_area_pa
);
1958 set_64bit_val(wqe
, 48, (cq
->virtual_map
? cq
->first_pm_pbl_idx
: 0));
1960 header
= cq
->cq_uk
.cq_id
|
1961 LS_64((cq
->ceq_id_valid
? cq
->ceq_id
: 0), I40IW_CQPSQ_CQ_CEQID
) |
1962 LS_64(I40IW_CQP_OP_DESTROY_CQ
, I40IW_CQPSQ_OPCODE
) |
1963 LS_64(cq
->pbl_chunk_size
, I40IW_CQPSQ_CQ_LPBLSIZE
) |
1964 LS_64(cq
->virtual_map
, I40IW_CQPSQ_CQ_VIRTMAP
) |
1965 LS_64(cq
->ceqe_mask
, I40IW_CQPSQ_CQ_ENCEQEMASK
) |
1966 LS_64(cq
->ceq_id_valid
, I40IW_CQPSQ_CQ_CEQIDVALID
) |
1967 LS_64(cq
->tph_en
, I40IW_CQPSQ_TPHEN
) |
1968 LS_64(cq
->cq_uk
.avoid_mem_cflct
, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT
) |
1969 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
1971 i40iw_insert_wqe_hdr(wqe
, header
);
1973 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "CQ_DESTROY WQE",
1974 wqe
, I40IW_CQP_WQE_SIZE
* 8);
1977 i40iw_sc_cqp_post_sq(cqp
);
1982 * i40iw_sc_cq_modify - modify a Completion Queue
1984 * @info: modification info struct
1986 * @post_sq: flag to post to sq
1988 static enum i40iw_status_code
i40iw_sc_cq_modify(struct i40iw_sc_cq
*cq
,
1989 struct i40iw_modify_cq_info
*info
,
1993 struct i40iw_sc_cqp
*cqp
;
1996 u32 cq_size
, ceq_id
, first_pm_pbl_idx
;
1998 bool virtual_map
, ceq_id_valid
, check_overflow
;
2001 if (info
->ceq_valid
&& (info
->ceq_id
> I40IW_MAX_CEQID
))
2002 return I40IW_ERR_INVALID_CEQ_ID
;
2004 pble_obj_cnt
= cq
->dev
->hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
;
2006 if (info
->cq_resize
&& info
->virtual_map
&&
2007 (info
->first_pm_pbl_idx
>= pble_obj_cnt
))
2008 return I40IW_ERR_INVALID_PBLE_INDEX
;
2011 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2013 return I40IW_ERR_RING_FULL
;
2015 cq
->pbl_list
= info
->pbl_list
;
2016 cq
->cq_pa
= info
->cq_pa
;
2017 cq
->first_pm_pbl_idx
= info
->first_pm_pbl_idx
;
2019 cq_size
= info
->cq_resize
? info
->cq_size
: cq
->cq_uk
.cq_size
;
2020 if (info
->ceq_change
) {
2021 ceq_id_valid
= true;
2022 ceq_id
= info
->ceq_id
;
2024 ceq_id_valid
= cq
->ceq_id_valid
;
2025 ceq_id
= ceq_id_valid
? cq
->ceq_id
: 0;
2027 virtual_map
= info
->cq_resize
? info
->virtual_map
: cq
->virtual_map
;
2028 first_pm_pbl_idx
= (info
->cq_resize
?
2029 (info
->virtual_map
? info
->first_pm_pbl_idx
: 0) :
2030 (cq
->virtual_map
? cq
->first_pm_pbl_idx
: 0));
2031 pbl_chunk_size
= (info
->cq_resize
?
2032 (info
->virtual_map
? info
->pbl_chunk_size
: 0) :
2033 (cq
->virtual_map
? cq
->pbl_chunk_size
: 0));
2034 check_overflow
= info
->check_overflow_change
? info
->check_overflow
:
2036 cq
->cq_uk
.cq_size
= cq_size
;
2037 cq
->ceq_id_valid
= ceq_id_valid
;
2038 cq
->ceq_id
= ceq_id
;
2039 cq
->virtual_map
= virtual_map
;
2040 cq
->first_pm_pbl_idx
= first_pm_pbl_idx
;
2041 cq
->pbl_chunk_size
= pbl_chunk_size
;
2042 cq
->check_overflow
= check_overflow
;
2044 set_64bit_val(wqe
, 0, cq_size
);
2045 set_64bit_val(wqe
, 8, RS_64_1(cq
, 1));
2046 set_64bit_val(wqe
, 16,
2047 LS_64(info
->shadow_read_threshold
, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD
));
2048 set_64bit_val(wqe
, 32, (cq
->virtual_map
? 0 : cq
->cq_pa
));
2049 set_64bit_val(wqe
, 40, cq
->shadow_area_pa
);
2050 set_64bit_val(wqe
, 48, (cq
->virtual_map
? first_pm_pbl_idx
: 0));
2051 set_64bit_val(wqe
, 56, LS_64(cq
->tph_val
, I40IW_CQPSQ_TPHVAL
));
2053 header
= cq
->cq_uk
.cq_id
|
2054 LS_64(ceq_id
, I40IW_CQPSQ_CQ_CEQID
) |
2055 LS_64(I40IW_CQP_OP_MODIFY_CQ
, I40IW_CQPSQ_OPCODE
) |
2056 LS_64(info
->cq_resize
, I40IW_CQPSQ_CQ_CQRESIZE
) |
2057 LS_64(pbl_chunk_size
, I40IW_CQPSQ_CQ_LPBLSIZE
) |
2058 LS_64(check_overflow
, I40IW_CQPSQ_CQ_CHKOVERFLOW
) |
2059 LS_64(virtual_map
, I40IW_CQPSQ_CQ_VIRTMAP
) |
2060 LS_64(cq
->ceqe_mask
, I40IW_CQPSQ_CQ_ENCEQEMASK
) |
2061 LS_64(ceq_id_valid
, I40IW_CQPSQ_CQ_CEQIDVALID
) |
2062 LS_64(cq
->tph_en
, I40IW_CQPSQ_TPHEN
) |
2063 LS_64(cq
->cq_uk
.avoid_mem_cflct
, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT
) |
2064 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2066 i40iw_insert_wqe_hdr(wqe
, header
);
2068 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "CQ_MODIFY WQE",
2069 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2072 i40iw_sc_cqp_post_sq(cqp
);
2077 * i40iw_sc_qp_init - initialize qp
2079 * @info: initialization qp info
2081 static enum i40iw_status_code
i40iw_sc_qp_init(struct i40iw_sc_qp
*qp
,
2082 struct i40iw_qp_init_info
*info
)
2084 u32 __iomem
*wqe_alloc_reg
= NULL
;
2085 enum i40iw_status_code ret_code
;
2090 qp
->dev
= info
->pd
->dev
;
2091 qp
->sq_pa
= info
->sq_pa
;
2092 qp
->rq_pa
= info
->rq_pa
;
2093 qp
->hw_host_ctx_pa
= info
->host_ctx_pa
;
2094 qp
->q2_pa
= info
->q2_pa
;
2095 qp
->shadow_area_pa
= info
->shadow_area_pa
;
2097 qp
->q2_buf
= info
->q2
;
2099 qp
->hw_host_ctx
= info
->host_ctx
;
2100 offset
= (qp
->pd
->dev
->is_pf
) ? I40E_PFPE_WQEALLOC
: I40E_VFPE_WQEALLOC1
;
2101 if (i40iw_get_hw_addr(qp
->pd
->dev
))
2102 wqe_alloc_reg
= (u32 __iomem
*)(i40iw_get_hw_addr(qp
->pd
->dev
) +
2105 info
->qp_uk_init_info
.wqe_alloc_reg
= wqe_alloc_reg
;
2106 ret_code
= i40iw_qp_uk_init(&qp
->qp_uk
, &info
->qp_uk_init_info
);
2109 qp
->virtual_map
= info
->virtual_map
;
2111 pble_obj_cnt
= info
->pd
->dev
->hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
;
2113 if ((info
->virtual_map
&& (info
->sq_pa
>= pble_obj_cnt
)) ||
2114 (info
->virtual_map
&& (info
->rq_pa
>= pble_obj_cnt
)))
2115 return I40IW_ERR_INVALID_PBLE_INDEX
;
2117 qp
->llp_stream_handle
= (void *)(-1);
2118 qp
->qp_type
= (info
->type
) ? info
->type
: I40IW_QP_TYPE_IWARP
;
2120 qp
->hw_sq_size
= i40iw_get_encoded_wqe_size(qp
->qp_uk
.sq_ring
.size
,
2122 i40iw_debug(qp
->dev
, I40IW_DEBUG_WQE
, "%s: hw_sq_size[%04d] sq_ring.size[%04d]\n",
2123 __func__
, qp
->hw_sq_size
, qp
->qp_uk
.sq_ring
.size
);
2124 ret_code
= i40iw_fragcnt_to_wqesize_rq(qp
->qp_uk
.max_rq_frag_cnt
,
2128 qp
->hw_rq_size
= i40iw_get_encoded_wqe_size(qp
->qp_uk
.rq_size
*
2129 (wqe_size
/ I40IW_QP_WQE_MIN_SIZE
), false);
2130 i40iw_debug(qp
->dev
, I40IW_DEBUG_WQE
,
2131 "%s: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
2132 __func__
, qp
->hw_rq_size
, qp
->qp_uk
.rq_size
, wqe_size
);
2133 qp
->sq_tph_val
= info
->sq_tph_val
;
2134 qp
->rq_tph_val
= info
->rq_tph_val
;
2135 qp
->sq_tph_en
= info
->sq_tph_en
;
2136 qp
->rq_tph_en
= info
->rq_tph_en
;
2137 qp
->rcv_tph_en
= info
->rcv_tph_en
;
2138 qp
->xmit_tph_en
= info
->xmit_tph_en
;
2139 qp
->qs_handle
= qp
->pd
->dev
->qs_handle
;
2140 qp
->exception_lan_queue
= qp
->pd
->dev
->exception_lan_queue
;
2146 * i40iw_sc_qp_create - create qp
2148 * @info: qp create info
2149 * @scratch: u64 saved to be used during cqp completion
2150 * @post_sq: flag for cqp db to ring
2152 static enum i40iw_status_code
i40iw_sc_qp_create(
2153 struct i40iw_sc_qp
*qp
,
2154 struct i40iw_create_qp_info
*info
,
2158 struct i40iw_sc_cqp
*cqp
;
2162 if ((qp
->qp_uk
.qp_id
< I40IW_MIN_IW_QP_ID
) ||
2163 (qp
->qp_uk
.qp_id
> I40IW_MAX_IW_QP_ID
))
2164 return I40IW_ERR_INVALID_QP_ID
;
2166 cqp
= qp
->pd
->dev
->cqp
;
2167 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2169 return I40IW_ERR_RING_FULL
;
2171 set_64bit_val(wqe
, 16, qp
->hw_host_ctx_pa
);
2173 set_64bit_val(wqe
, 40, qp
->shadow_area_pa
);
2175 header
= qp
->qp_uk
.qp_id
|
2176 LS_64(I40IW_CQP_OP_CREATE_QP
, I40IW_CQPSQ_OPCODE
) |
2177 LS_64((info
->ord_valid
? 1 : 0), I40IW_CQPSQ_QP_ORDVALID
) |
2178 LS_64(info
->tcp_ctx_valid
, I40IW_CQPSQ_QP_TOECTXVALID
) |
2179 LS_64(qp
->qp_type
, I40IW_CQPSQ_QP_QPTYPE
) |
2180 LS_64(qp
->virtual_map
, I40IW_CQPSQ_QP_VQ
) |
2181 LS_64(info
->cq_num_valid
, I40IW_CQPSQ_QP_CQNUMVALID
) |
2182 LS_64(info
->static_rsrc
, I40IW_CQPSQ_QP_STATRSRC
) |
2183 LS_64(info
->arp_cache_idx_valid
, I40IW_CQPSQ_QP_ARPTABIDXVALID
) |
2184 LS_64(info
->next_iwarp_state
, I40IW_CQPSQ_QP_NEXTIWSTATE
) |
2185 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2187 i40iw_insert_wqe_hdr(wqe
, header
);
2188 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "QP_CREATE WQE",
2189 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2192 i40iw_sc_cqp_post_sq(cqp
);
2197 * i40iw_sc_qp_modify - modify qp cqp wqe
2199 * @info: modify qp info
2200 * @scratch: u64 saved to be used during cqp completion
2201 * @post_sq: flag for cqp db to ring
2203 static enum i40iw_status_code
i40iw_sc_qp_modify(
2204 struct i40iw_sc_qp
*qp
,
2205 struct i40iw_modify_qp_info
*info
,
2210 struct i40iw_sc_cqp
*cqp
;
2212 u8 term_actions
= 0;
2215 cqp
= qp
->pd
->dev
->cqp
;
2216 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2218 return I40IW_ERR_RING_FULL
;
2219 if (info
->next_iwarp_state
== I40IW_QP_STATE_TERMINATE
) {
2220 if (info
->dont_send_fin
)
2221 term_actions
+= I40IWQP_TERM_SEND_TERM_ONLY
;
2222 if (info
->dont_send_term
)
2223 term_actions
+= I40IWQP_TERM_SEND_FIN_ONLY
;
2224 if ((term_actions
== I40IWQP_TERM_SEND_TERM_AND_FIN
) ||
2225 (term_actions
== I40IWQP_TERM_SEND_TERM_ONLY
))
2226 term_len
= info
->termlen
;
2231 LS_64(info
->new_mss
, I40IW_CQPSQ_QP_NEWMSS
) |
2232 LS_64(term_len
, I40IW_CQPSQ_QP_TERMLEN
));
2234 set_64bit_val(wqe
, 16, qp
->hw_host_ctx_pa
);
2235 set_64bit_val(wqe
, 40, qp
->shadow_area_pa
);
2237 header
= qp
->qp_uk
.qp_id
|
2238 LS_64(I40IW_CQP_OP_MODIFY_QP
, I40IW_CQPSQ_OPCODE
) |
2239 LS_64(info
->ord_valid
, I40IW_CQPSQ_QP_ORDVALID
) |
2240 LS_64(info
->tcp_ctx_valid
, I40IW_CQPSQ_QP_TOECTXVALID
) |
2241 LS_64(info
->cached_var_valid
, I40IW_CQPSQ_QP_CACHEDVARVALID
) |
2242 LS_64(qp
->virtual_map
, I40IW_CQPSQ_QP_VQ
) |
2243 LS_64(info
->cq_num_valid
, I40IW_CQPSQ_QP_CQNUMVALID
) |
2244 LS_64(info
->force_loopback
, I40IW_CQPSQ_QP_FORCELOOPBACK
) |
2245 LS_64(qp
->qp_type
, I40IW_CQPSQ_QP_QPTYPE
) |
2246 LS_64(info
->mss_change
, I40IW_CQPSQ_QP_MSSCHANGE
) |
2247 LS_64(info
->static_rsrc
, I40IW_CQPSQ_QP_STATRSRC
) |
2248 LS_64(info
->remove_hash_idx
, I40IW_CQPSQ_QP_REMOVEHASHENTRY
) |
2249 LS_64(term_actions
, I40IW_CQPSQ_QP_TERMACT
) |
2250 LS_64(info
->reset_tcp_conn
, I40IW_CQPSQ_QP_RESETCON
) |
2251 LS_64(info
->arp_cache_idx_valid
, I40IW_CQPSQ_QP_ARPTABIDXVALID
) |
2252 LS_64(info
->next_iwarp_state
, I40IW_CQPSQ_QP_NEXTIWSTATE
) |
2253 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2255 i40iw_insert_wqe_hdr(wqe
, header
);
2257 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "QP_MODIFY WQE",
2258 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2261 i40iw_sc_cqp_post_sq(cqp
);
2266 * i40iw_sc_qp_destroy - cqp destroy qp
2268 * @scratch: u64 saved to be used during cqp completion
2269 * @remove_hash_idx: flag if to remove hash idx
2270 * @ignore_mw_bnd: memory window bind flag
2271 * @post_sq: flag for cqp db to ring
2273 static enum i40iw_status_code
i40iw_sc_qp_destroy(
2274 struct i40iw_sc_qp
*qp
,
2276 bool remove_hash_idx
,
2281 struct i40iw_sc_cqp
*cqp
;
2284 cqp
= qp
->pd
->dev
->cqp
;
2285 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2287 return I40IW_ERR_RING_FULL
;
2288 set_64bit_val(wqe
, 16, qp
->hw_host_ctx_pa
);
2289 set_64bit_val(wqe
, 40, qp
->shadow_area_pa
);
2291 header
= qp
->qp_uk
.qp_id
|
2292 LS_64(I40IW_CQP_OP_DESTROY_QP
, I40IW_CQPSQ_OPCODE
) |
2293 LS_64(qp
->qp_type
, I40IW_CQPSQ_QP_QPTYPE
) |
2294 LS_64(ignore_mw_bnd
, I40IW_CQPSQ_QP_IGNOREMWBOUND
) |
2295 LS_64(remove_hash_idx
, I40IW_CQPSQ_QP_REMOVEHASHENTRY
) |
2296 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2298 i40iw_insert_wqe_hdr(wqe
, header
);
2299 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "QP_DESTROY WQE",
2300 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2303 i40iw_sc_cqp_post_sq(cqp
);
2308 * i40iw_sc_qp_flush_wqes - flush qp's wqe
2310 * @info: dlush information
2311 * @scratch: u64 saved to be used during cqp completion
2312 * @post_sq: flag for cqp db to ring
2314 static enum i40iw_status_code
i40iw_sc_qp_flush_wqes(
2315 struct i40iw_sc_qp
*qp
,
2316 struct i40iw_qp_flush_info
*info
,
2322 struct i40iw_sc_cqp
*cqp
;
2324 bool flush_sq
= false, flush_rq
= false;
2326 if (info
->rq
&& !qp
->flush_rq
)
2329 if (info
->sq
&& !qp
->flush_sq
)
2332 qp
->flush_sq
|= flush_sq
;
2333 qp
->flush_rq
|= flush_rq
;
2334 if (!flush_sq
&& !flush_rq
) {
2335 if (info
->ae_code
!= I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR
)
2339 cqp
= qp
->pd
->dev
->cqp
;
2340 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2342 return I40IW_ERR_RING_FULL
;
2343 if (info
->userflushcode
) {
2345 temp
|= LS_64(info
->rq_minor_code
, I40IW_CQPSQ_FWQE_RQMNERR
) |
2346 LS_64(info
->rq_major_code
, I40IW_CQPSQ_FWQE_RQMJERR
);
2349 temp
|= LS_64(info
->sq_minor_code
, I40IW_CQPSQ_FWQE_SQMNERR
) |
2350 LS_64(info
->sq_major_code
, I40IW_CQPSQ_FWQE_SQMJERR
);
2353 set_64bit_val(wqe
, 16, temp
);
2355 temp
= (info
->generate_ae
) ?
2356 info
->ae_code
| LS_64(info
->ae_source
, I40IW_CQPSQ_FWQE_AESOURCE
) : 0;
2358 set_64bit_val(wqe
, 8, temp
);
2360 header
= qp
->qp_uk
.qp_id
|
2361 LS_64(I40IW_CQP_OP_FLUSH_WQES
, I40IW_CQPSQ_OPCODE
) |
2362 LS_64(info
->generate_ae
, I40IW_CQPSQ_FWQE_GENERATE_AE
) |
2363 LS_64(info
->userflushcode
, I40IW_CQPSQ_FWQE_USERFLCODE
) |
2364 LS_64(flush_sq
, I40IW_CQPSQ_FWQE_FLUSHSQ
) |
2365 LS_64(flush_rq
, I40IW_CQPSQ_FWQE_FLUSHRQ
) |
2366 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2368 i40iw_insert_wqe_hdr(wqe
, header
);
2370 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "QP_FLUSH WQE",
2371 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2374 i40iw_sc_cqp_post_sq(cqp
);
2379 * i40iw_sc_qp_upload_context - upload qp's context
2380 * @dev: sc device struct
2381 * @info: upload context info ptr for return
2382 * @scratch: u64 saved to be used during cqp completion
2383 * @post_sq: flag for cqp db to ring
2385 static enum i40iw_status_code
i40iw_sc_qp_upload_context(
2386 struct i40iw_sc_dev
*dev
,
2387 struct i40iw_upload_context_info
*info
,
2392 struct i40iw_sc_cqp
*cqp
;
2396 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2398 return I40IW_ERR_RING_FULL
;
2399 set_64bit_val(wqe
, 16, info
->buf_pa
);
2401 header
= LS_64(info
->qp_id
, I40IW_CQPSQ_UCTX_QPID
) |
2402 LS_64(I40IW_CQP_OP_UPLOAD_CONTEXT
, I40IW_CQPSQ_OPCODE
) |
2403 LS_64(info
->qp_type
, I40IW_CQPSQ_UCTX_QPTYPE
) |
2404 LS_64(info
->raw_format
, I40IW_CQPSQ_UCTX_RAWFORMAT
) |
2405 LS_64(info
->freeze_qp
, I40IW_CQPSQ_UCTX_FREEZEQP
) |
2406 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2408 i40iw_insert_wqe_hdr(wqe
, header
);
2410 i40iw_debug_buf(dev
, I40IW_DEBUG_WQE
, "QP_UPLOAD_CTX WQE",
2411 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2414 i40iw_sc_cqp_post_sq(cqp
);
2419 * i40iw_sc_qp_setctx - set qp's context
2421 * @qp_ctx: context ptr
2424 static enum i40iw_status_code
i40iw_sc_qp_setctx(
2425 struct i40iw_sc_qp
*qp
,
2427 struct i40iw_qp_host_ctx_info
*info
)
2429 struct i40iwarp_offload_info
*iw
;
2430 struct i40iw_tcp_offload_info
*tcp
;
2431 u64 qw0
, qw3
, qw7
= 0;
2433 iw
= info
->iwarp_info
;
2434 tcp
= info
->tcp_info
;
2435 qw0
= LS_64(qp
->qp_uk
.rq_wqe_size
, I40IWQPC_RQWQESIZE
) |
2436 LS_64(info
->err_rq_idx_valid
, I40IWQPC_ERR_RQ_IDX_VALID
) |
2437 LS_64(qp
->rcv_tph_en
, I40IWQPC_RCVTPHEN
) |
2438 LS_64(qp
->xmit_tph_en
, I40IWQPC_XMITTPHEN
) |
2439 LS_64(qp
->rq_tph_en
, I40IWQPC_RQTPHEN
) |
2440 LS_64(qp
->sq_tph_en
, I40IWQPC_SQTPHEN
) |
2441 LS_64(info
->push_idx
, I40IWQPC_PPIDX
) |
2442 LS_64(info
->push_mode_en
, I40IWQPC_PMENA
);
2444 set_64bit_val(qp_ctx
, 8, qp
->sq_pa
);
2445 set_64bit_val(qp_ctx
, 16, qp
->rq_pa
);
2447 qw3
= LS_64(qp
->src_mac_addr_idx
, I40IWQPC_SRCMACADDRIDX
) |
2448 LS_64(qp
->hw_rq_size
, I40IWQPC_RQSIZE
) |
2449 LS_64(qp
->hw_sq_size
, I40IWQPC_SQSIZE
);
2451 set_64bit_val(qp_ctx
,
2453 LS_64(info
->err_rq_idx
, I40IWQPC_ERR_RQ_IDX
));
2455 set_64bit_val(qp_ctx
,
2457 LS_64(info
->send_cq_num
, I40IWQPC_TXCQNUM
) |
2458 LS_64(info
->rcv_cq_num
, I40IWQPC_RXCQNUM
));
2460 set_64bit_val(qp_ctx
,
2462 LS_64(info
->qp_compl_ctx
, I40IWQPC_QPCOMPCTX
));
2463 set_64bit_val(qp_ctx
,
2465 LS_64(qp
->sq_tph_val
, I40IWQPC_SQTPHVAL
) |
2466 LS_64(qp
->rq_tph_val
, I40IWQPC_RQTPHVAL
) |
2467 LS_64(qp
->qs_handle
, I40IWQPC_QSHANDLE
) |
2468 LS_64(qp
->exception_lan_queue
, I40IWQPC_EXCEPTION_LAN_QUEUE
));
2470 if (info
->iwarp_info_valid
) {
2471 qw0
|= LS_64(iw
->ddp_ver
, I40IWQPC_DDP_VER
) |
2472 LS_64(iw
->rdmap_ver
, I40IWQPC_RDMAP_VER
);
2474 qw7
|= LS_64(iw
->pd_id
, I40IWQPC_PDIDX
);
2475 set_64bit_val(qp_ctx
, 144, qp
->q2_pa
);
2476 set_64bit_val(qp_ctx
,
2478 LS_64(iw
->last_byte_sent
, I40IWQPC_LASTBYTESENT
));
2481 * Hard-code IRD_SIZE to hw-limit, 128, in qpctx, i.e matching an
2482 *advertisable IRD of 64
2484 iw
->ird_size
= I40IW_QPCTX_ENCD_MAXIRD
;
2485 set_64bit_val(qp_ctx
,
2487 LS_64(iw
->ord_size
, I40IWQPC_ORDSIZE
) |
2488 LS_64(iw
->ird_size
, I40IWQPC_IRDSIZE
) |
2489 LS_64(iw
->wr_rdresp_en
, I40IWQPC_WRRDRSPOK
) |
2490 LS_64(iw
->rd_enable
, I40IWQPC_RDOK
) |
2491 LS_64(iw
->snd_mark_en
, I40IWQPC_SNDMARKERS
) |
2492 LS_64(iw
->bind_en
, I40IWQPC_BINDEN
) |
2493 LS_64(iw
->fast_reg_en
, I40IWQPC_FASTREGEN
) |
2494 LS_64(iw
->priv_mode_en
, I40IWQPC_PRIVEN
) |
2495 LS_64(1, I40IWQPC_IWARPMODE
) |
2496 LS_64(iw
->rcv_mark_en
, I40IWQPC_RCVMARKERS
) |
2497 LS_64(iw
->align_hdrs
, I40IWQPC_ALIGNHDRS
) |
2498 LS_64(iw
->rcv_no_mpa_crc
, I40IWQPC_RCVNOMPACRC
) |
2499 LS_64(iw
->rcv_mark_offset
, I40IWQPC_RCVMARKOFFSET
) |
2500 LS_64(iw
->snd_mark_offset
, I40IWQPC_SNDMARKOFFSET
));
2502 if (info
->tcp_info_valid
) {
2503 qw0
|= LS_64(tcp
->ipv4
, I40IWQPC_IPV4
) |
2504 LS_64(tcp
->no_nagle
, I40IWQPC_NONAGLE
) |
2505 LS_64(tcp
->insert_vlan_tag
, I40IWQPC_INSERTVLANTAG
) |
2506 LS_64(tcp
->time_stamp
, I40IWQPC_TIMESTAMP
) |
2507 LS_64(tcp
->cwnd_inc_limit
, I40IWQPC_LIMIT
) |
2508 LS_64(tcp
->drop_ooo_seg
, I40IWQPC_DROPOOOSEG
) |
2509 LS_64(tcp
->dup_ack_thresh
, I40IWQPC_DUPACK_THRESH
);
2511 qw3
|= LS_64(tcp
->ttl
, I40IWQPC_TTL
) |
2512 LS_64(tcp
->src_mac_addr_idx
, I40IWQPC_SRCMACADDRIDX
) |
2513 LS_64(tcp
->avoid_stretch_ack
, I40IWQPC_AVOIDSTRETCHACK
) |
2514 LS_64(tcp
->tos
, I40IWQPC_TOS
) |
2515 LS_64(tcp
->src_port
, I40IWQPC_SRCPORTNUM
) |
2516 LS_64(tcp
->dst_port
, I40IWQPC_DESTPORTNUM
);
2518 qp
->src_mac_addr_idx
= tcp
->src_mac_addr_idx
;
2519 set_64bit_val(qp_ctx
,
2521 LS_64(tcp
->dest_ip_addr2
, I40IWQPC_DESTIPADDR2
) |
2522 LS_64(tcp
->dest_ip_addr3
, I40IWQPC_DESTIPADDR3
));
2524 set_64bit_val(qp_ctx
,
2526 LS_64(tcp
->dest_ip_addr0
, I40IWQPC_DESTIPADDR0
) |
2527 LS_64(tcp
->dest_ip_addr1
, I40IWQPC_DESTIPADDR1
));
2529 set_64bit_val(qp_ctx
,
2531 LS_64(tcp
->snd_mss
, I40IWQPC_SNDMSS
) |
2532 LS_64(tcp
->vlan_tag
, I40IWQPC_VLANTAG
) |
2533 LS_64(tcp
->arp_idx
, I40IWQPC_ARPIDX
));
2535 qw7
|= LS_64(tcp
->flow_label
, I40IWQPC_FLOWLABEL
) |
2536 LS_64(tcp
->wscale
, I40IWQPC_WSCALE
) |
2537 LS_64(tcp
->ignore_tcp_opt
, I40IWQPC_IGNORE_TCP_OPT
) |
2538 LS_64(tcp
->ignore_tcp_uns_opt
, I40IWQPC_IGNORE_TCP_UNS_OPT
) |
2539 LS_64(tcp
->tcp_state
, I40IWQPC_TCPSTATE
) |
2540 LS_64(tcp
->rcv_wscale
, I40IWQPC_RCVSCALE
) |
2541 LS_64(tcp
->snd_wscale
, I40IWQPC_SNDSCALE
);
2543 set_64bit_val(qp_ctx
,
2545 LS_64(tcp
->time_stamp_recent
, I40IWQPC_TIMESTAMP_RECENT
) |
2546 LS_64(tcp
->time_stamp_age
, I40IWQPC_TIMESTAMP_AGE
));
2547 set_64bit_val(qp_ctx
,
2549 LS_64(tcp
->snd_nxt
, I40IWQPC_SNDNXT
) |
2550 LS_64(tcp
->snd_wnd
, I40IWQPC_SNDWND
));
2552 set_64bit_val(qp_ctx
,
2554 LS_64(tcp
->rcv_nxt
, I40IWQPC_RCVNXT
) |
2555 LS_64(tcp
->rcv_wnd
, I40IWQPC_RCVWND
));
2556 set_64bit_val(qp_ctx
,
2558 LS_64(tcp
->snd_max
, I40IWQPC_SNDMAX
) |
2559 LS_64(tcp
->snd_una
, I40IWQPC_SNDUNA
));
2560 set_64bit_val(qp_ctx
,
2562 LS_64(tcp
->srtt
, I40IWQPC_SRTT
) |
2563 LS_64(tcp
->rtt_var
, I40IWQPC_RTTVAR
));
2564 set_64bit_val(qp_ctx
,
2566 LS_64(tcp
->ss_thresh
, I40IWQPC_SSTHRESH
) |
2567 LS_64(tcp
->cwnd
, I40IWQPC_CWND
));
2568 set_64bit_val(qp_ctx
,
2570 LS_64(tcp
->snd_wl1
, I40IWQPC_SNDWL1
) |
2571 LS_64(tcp
->snd_wl2
, I40IWQPC_SNDWL2
));
2572 set_64bit_val(qp_ctx
,
2574 LS_64(tcp
->max_snd_window
, I40IWQPC_MAXSNDWND
) |
2575 LS_64(tcp
->rexmit_thresh
, I40IWQPC_REXMIT_THRESH
));
2576 set_64bit_val(qp_ctx
,
2578 LS_64(tcp
->local_ipaddr3
, I40IWQPC_LOCAL_IPADDR3
) |
2579 LS_64(tcp
->local_ipaddr2
, I40IWQPC_LOCAL_IPADDR2
));
2580 set_64bit_val(qp_ctx
,
2582 LS_64(tcp
->local_ipaddr1
, I40IWQPC_LOCAL_IPADDR1
) |
2583 LS_64(tcp
->local_ipaddr0
, I40IWQPC_LOCAL_IPADDR0
));
2586 set_64bit_val(qp_ctx
, 0, qw0
);
2587 set_64bit_val(qp_ctx
, 24, qw3
);
2588 set_64bit_val(qp_ctx
, 56, qw7
);
2590 i40iw_debug_buf(qp
->dev
, I40IW_DEBUG_WQE
, "QP_HOST)CTX WQE",
2591 qp_ctx
, I40IW_QP_CTX_SIZE
);
2596 * i40iw_sc_alloc_stag - mr stag alloc
2597 * @dev: sc device struct
2599 * @scratch: u64 saved to be used during cqp completion
2600 * @post_sq: flag for cqp db to ring
2602 static enum i40iw_status_code
i40iw_sc_alloc_stag(
2603 struct i40iw_sc_dev
*dev
,
2604 struct i40iw_allocate_stag_info
*info
,
2609 struct i40iw_sc_cqp
*cqp
;
2613 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2615 return I40IW_ERR_RING_FULL
;
2618 LS_64(info
->pd_id
, I40IW_CQPSQ_STAG_PDID
) |
2619 LS_64(info
->total_len
, I40IW_CQPSQ_STAG_STAGLEN
));
2622 LS_64(info
->stag_idx
, I40IW_CQPSQ_STAG_IDX
));
2625 LS_64(info
->hmc_fcn_index
, I40IW_CQPSQ_STAG_HMCFNIDX
));
2627 header
= LS_64(I40IW_CQP_OP_ALLOC_STAG
, I40IW_CQPSQ_OPCODE
) |
2628 LS_64(1, I40IW_CQPSQ_STAG_MR
) |
2629 LS_64(info
->access_rights
, I40IW_CQPSQ_STAG_ARIGHTS
) |
2630 LS_64(info
->chunk_size
, I40IW_CQPSQ_STAG_LPBLSIZE
) |
2631 LS_64(info
->page_size
, I40IW_CQPSQ_STAG_HPAGESIZE
) |
2632 LS_64(info
->remote_access
, I40IW_CQPSQ_STAG_REMACCENABLED
) |
2633 LS_64(info
->use_hmc_fcn_index
, I40IW_CQPSQ_STAG_USEHMCFNIDX
) |
2634 LS_64(info
->use_pf_rid
, I40IW_CQPSQ_STAG_USEPFRID
) |
2635 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2637 i40iw_insert_wqe_hdr(wqe
, header
);
2639 i40iw_debug_buf(dev
, I40IW_DEBUG_WQE
, "ALLOC_STAG WQE",
2640 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2643 i40iw_sc_cqp_post_sq(cqp
);
2648 * i40iw_sc_mr_reg_non_shared - non-shared mr registration
2649 * @dev: sc device struct
2651 * @scratch: u64 saved to be used during cqp completion
2652 * @post_sq: flag for cqp db to ring
2654 static enum i40iw_status_code
i40iw_sc_mr_reg_non_shared(
2655 struct i40iw_sc_dev
*dev
,
2656 struct i40iw_reg_ns_stag_info
*info
,
2662 struct i40iw_sc_cqp
*cqp
;
2668 if (info
->access_rights
& (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY
|
2669 I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY
))
2670 remote_access
= true;
2672 remote_access
= false;
2674 pble_obj_cnt
= dev
->hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
;
2676 if (info
->chunk_size
&& (info
->first_pm_pbl_index
>= pble_obj_cnt
))
2677 return I40IW_ERR_INVALID_PBLE_INDEX
;
2680 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2682 return I40IW_ERR_RING_FULL
;
2684 temp
= (info
->addr_type
== I40IW_ADDR_TYPE_VA_BASED
) ? (uintptr_t)info
->va
: info
->fbo
;
2685 set_64bit_val(wqe
, 0, temp
);
2689 LS_64(info
->total_len
, I40IW_CQPSQ_STAG_STAGLEN
) |
2690 LS_64(info
->pd_id
, I40IW_CQPSQ_STAG_PDID
));
2694 LS_64(info
->stag_key
, I40IW_CQPSQ_STAG_KEY
) |
2695 LS_64(info
->stag_idx
, I40IW_CQPSQ_STAG_IDX
));
2696 if (!info
->chunk_size
) {
2697 set_64bit_val(wqe
, 32, info
->reg_addr_pa
);
2698 set_64bit_val(wqe
, 48, 0);
2700 set_64bit_val(wqe
, 32, 0);
2701 set_64bit_val(wqe
, 48, info
->first_pm_pbl_index
);
2703 set_64bit_val(wqe
, 40, info
->hmc_fcn_index
);
2704 set_64bit_val(wqe
, 56, 0);
2706 addr_type
= (info
->addr_type
== I40IW_ADDR_TYPE_VA_BASED
) ? 1 : 0;
2707 header
= LS_64(I40IW_CQP_OP_REG_MR
, I40IW_CQPSQ_OPCODE
) |
2708 LS_64(1, I40IW_CQPSQ_STAG_MR
) |
2709 LS_64(info
->chunk_size
, I40IW_CQPSQ_STAG_LPBLSIZE
) |
2710 LS_64(info
->page_size
, I40IW_CQPSQ_STAG_HPAGESIZE
) |
2711 LS_64(info
->access_rights
, I40IW_CQPSQ_STAG_ARIGHTS
) |
2712 LS_64(remote_access
, I40IW_CQPSQ_STAG_REMACCENABLED
) |
2713 LS_64(addr_type
, I40IW_CQPSQ_STAG_VABASEDTO
) |
2714 LS_64(info
->use_hmc_fcn_index
, I40IW_CQPSQ_STAG_USEHMCFNIDX
) |
2715 LS_64(info
->use_pf_rid
, I40IW_CQPSQ_STAG_USEPFRID
) |
2716 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2718 i40iw_insert_wqe_hdr(wqe
, header
);
2720 i40iw_debug_buf(dev
, I40IW_DEBUG_WQE
, "MR_REG_NS WQE",
2721 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2724 i40iw_sc_cqp_post_sq(cqp
);
2729 * i40iw_sc_mr_reg_shared - registered shared memory region
2730 * @dev: sc device struct
2731 * @info: info for shared memory registeration
2732 * @scratch: u64 saved to be used during cqp completion
2733 * @post_sq: flag for cqp db to ring
2735 static enum i40iw_status_code
i40iw_sc_mr_reg_shared(
2736 struct i40iw_sc_dev
*dev
,
2737 struct i40iw_register_shared_stag
*info
,
2742 struct i40iw_sc_cqp
*cqp
;
2743 u64 temp
, va64
, fbo
, header
;
2748 if (info
->access_rights
& (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY
|
2749 I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY
))
2750 remote_access
= true;
2752 remote_access
= false;
2754 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2756 return I40IW_ERR_RING_FULL
;
2757 va64
= (uintptr_t)(info
->va
);
2758 va32
= (u32
)(va64
& 0x00000000FFFFFFFF);
2759 fbo
= (u64
)(va32
& (4096 - 1));
2763 (info
->addr_type
== I40IW_ADDR_TYPE_VA_BASED
? (uintptr_t)info
->va
: fbo
));
2767 LS_64(info
->pd_id
, I40IW_CQPSQ_STAG_PDID
));
2768 temp
= LS_64(info
->new_stag_key
, I40IW_CQPSQ_STAG_KEY
) |
2769 LS_64(info
->new_stag_idx
, I40IW_CQPSQ_STAG_IDX
) |
2770 LS_64(info
->parent_stag_idx
, I40IW_CQPSQ_STAG_PARENTSTAGIDX
);
2771 set_64bit_val(wqe
, 16, temp
);
2773 addr_type
= (info
->addr_type
== I40IW_ADDR_TYPE_VA_BASED
) ? 1 : 0;
2774 header
= LS_64(I40IW_CQP_OP_REG_SMR
, I40IW_CQPSQ_OPCODE
) |
2775 LS_64(1, I40IW_CQPSQ_STAG_MR
) |
2776 LS_64(info
->access_rights
, I40IW_CQPSQ_STAG_ARIGHTS
) |
2777 LS_64(remote_access
, I40IW_CQPSQ_STAG_REMACCENABLED
) |
2778 LS_64(addr_type
, I40IW_CQPSQ_STAG_VABASEDTO
) |
2779 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2781 i40iw_insert_wqe_hdr(wqe
, header
);
2783 i40iw_debug_buf(dev
, I40IW_DEBUG_WQE
, "MR_REG_SHARED WQE",
2784 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2787 i40iw_sc_cqp_post_sq(cqp
);
2792 * i40iw_sc_dealloc_stag - deallocate stag
2793 * @dev: sc device struct
2794 * @info: dealloc stag info
2795 * @scratch: u64 saved to be used during cqp completion
2796 * @post_sq: flag for cqp db to ring
2798 static enum i40iw_status_code
i40iw_sc_dealloc_stag(
2799 struct i40iw_sc_dev
*dev
,
2800 struct i40iw_dealloc_stag_info
*info
,
2806 struct i40iw_sc_cqp
*cqp
;
2809 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2811 return I40IW_ERR_RING_FULL
;
2814 LS_64(info
->pd_id
, I40IW_CQPSQ_STAG_PDID
));
2817 LS_64(info
->stag_idx
, I40IW_CQPSQ_STAG_IDX
));
2819 header
= LS_64(I40IW_CQP_OP_DEALLOC_STAG
, I40IW_CQPSQ_OPCODE
) |
2820 LS_64(info
->mr
, I40IW_CQPSQ_STAG_MR
) |
2821 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2823 i40iw_insert_wqe_hdr(wqe
, header
);
2825 i40iw_debug_buf(dev
, I40IW_DEBUG_WQE
, "DEALLOC_STAG WQE",
2826 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2829 i40iw_sc_cqp_post_sq(cqp
);
2834 * i40iw_sc_query_stag - query hardware for stag
2835 * @dev: sc device struct
2836 * @scratch: u64 saved to be used during cqp completion
2837 * @stag_index: stag index for query
2838 * @post_sq: flag for cqp db to ring
2840 static enum i40iw_status_code
i40iw_sc_query_stag(struct i40iw_sc_dev
*dev
,
2847 struct i40iw_sc_cqp
*cqp
;
2850 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2852 return I40IW_ERR_RING_FULL
;
2855 LS_64(stag_index
, I40IW_CQPSQ_QUERYSTAG_IDX
));
2857 header
= LS_64(I40IW_CQP_OP_QUERY_STAG
, I40IW_CQPSQ_OPCODE
) |
2858 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2860 i40iw_insert_wqe_hdr(wqe
, header
);
2862 i40iw_debug_buf(dev
, I40IW_DEBUG_WQE
, "QUERY_STAG WQE",
2863 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2866 i40iw_sc_cqp_post_sq(cqp
);
2871 * i40iw_sc_mw_alloc - mw allocate
2872 * @dev: sc device struct
2873 * @scratch: u64 saved to be used during cqp completion
2874 * @mw_stag_index:stag index
2875 * @pd_id: pd is for this mw
2876 * @post_sq: flag for cqp db to ring
2878 static enum i40iw_status_code
i40iw_sc_mw_alloc(
2879 struct i40iw_sc_dev
*dev
,
2886 struct i40iw_sc_cqp
*cqp
;
2890 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
2892 return I40IW_ERR_RING_FULL
;
2893 set_64bit_val(wqe
, 8, LS_64(pd_id
, I40IW_CQPSQ_STAG_PDID
));
2896 LS_64(mw_stag_index
, I40IW_CQPSQ_STAG_IDX
));
2898 header
= LS_64(I40IW_CQP_OP_ALLOC_STAG
, I40IW_CQPSQ_OPCODE
) |
2899 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
2901 i40iw_insert_wqe_hdr(wqe
, header
);
2903 i40iw_debug_buf(dev
, I40IW_DEBUG_WQE
, "MW_ALLOC WQE",
2904 wqe
, I40IW_CQP_WQE_SIZE
* 8);
2907 i40iw_sc_cqp_post_sq(cqp
);
2912 * i40iw_sc_send_lsmm - send last streaming mode message
2914 * @lsmm_buf: buffer with lsmm message
2915 * @size: size of lsmm buffer
2916 * @stag: stag of lsmm buffer
2918 static void i40iw_sc_send_lsmm(struct i40iw_sc_qp
*qp
,
2925 struct i40iw_qp_uk
*qp_uk
;
2928 wqe
= qp_uk
->sq_base
->elem
;
2930 set_64bit_val(wqe
, 0, (uintptr_t)lsmm_buf
);
2932 set_64bit_val(wqe
, 8, (size
| LS_64(stag
, I40IWQPSQ_FRAG_STAG
)));
2934 set_64bit_val(wqe
, 16, 0);
2936 header
= LS_64(I40IWQP_OP_RDMA_SEND
, I40IWQPSQ_OPCODE
) |
2937 LS_64(1, I40IWQPSQ_STREAMMODE
) |
2938 LS_64(1, I40IWQPSQ_WAITFORRCVPDU
) |
2939 LS_64(qp
->qp_uk
.swqe_polarity
, I40IWQPSQ_VALID
);
2941 i40iw_insert_wqe_hdr(wqe
, header
);
2943 i40iw_debug_buf(qp
->dev
, I40IW_DEBUG_QP
, "SEND_LSMM WQE",
2944 wqe
, I40IW_QP_WQE_MIN_SIZE
);
2948 * i40iw_sc_send_lsmm_nostag - for privilege qp
2950 * @lsmm_buf: buffer with lsmm message
2951 * @size: size of lsmm buffer
2953 static void i40iw_sc_send_lsmm_nostag(struct i40iw_sc_qp
*qp
,
2959 struct i40iw_qp_uk
*qp_uk
;
2962 wqe
= qp_uk
->sq_base
->elem
;
2964 set_64bit_val(wqe
, 0, (uintptr_t)lsmm_buf
);
2966 set_64bit_val(wqe
, 8, size
);
2968 set_64bit_val(wqe
, 16, 0);
2970 header
= LS_64(I40IWQP_OP_RDMA_SEND
, I40IWQPSQ_OPCODE
) |
2971 LS_64(1, I40IWQPSQ_STREAMMODE
) |
2972 LS_64(1, I40IWQPSQ_WAITFORRCVPDU
) |
2973 LS_64(qp
->qp_uk
.swqe_polarity
, I40IWQPSQ_VALID
);
2975 i40iw_insert_wqe_hdr(wqe
, header
);
2977 i40iw_debug_buf(qp
->dev
, I40IW_DEBUG_WQE
, "SEND_LSMM_NOSTAG WQE",
2978 wqe
, I40IW_QP_WQE_MIN_SIZE
);
2982 * i40iw_sc_send_rtt - send last read0 or write0
2984 * @read: Do read0 or write0
2986 static void i40iw_sc_send_rtt(struct i40iw_sc_qp
*qp
, bool read
)
2990 struct i40iw_qp_uk
*qp_uk
;
2993 wqe
= qp_uk
->sq_base
->elem
;
2995 set_64bit_val(wqe
, 0, 0);
2996 set_64bit_val(wqe
, 8, 0);
2997 set_64bit_val(wqe
, 16, 0);
2999 header
= LS_64(0x1234, I40IWQPSQ_REMSTAG
) |
3000 LS_64(I40IWQP_OP_RDMA_READ
, I40IWQPSQ_OPCODE
) |
3001 LS_64(qp
->qp_uk
.swqe_polarity
, I40IWQPSQ_VALID
);
3002 set_64bit_val(wqe
, 8, ((u64
)0xabcd << 32));
3004 header
= LS_64(I40IWQP_OP_RDMA_WRITE
, I40IWQPSQ_OPCODE
) |
3005 LS_64(qp
->qp_uk
.swqe_polarity
, I40IWQPSQ_VALID
);
3008 i40iw_insert_wqe_hdr(wqe
, header
);
3010 i40iw_debug_buf(qp
->dev
, I40IW_DEBUG_WQE
, "RTR WQE",
3011 wqe
, I40IW_QP_WQE_MIN_SIZE
);
3015 * i40iw_sc_post_wqe0 - send wqe with opcode
3017 * @opcode: opcode to use for wqe0
3019 static enum i40iw_status_code
i40iw_sc_post_wqe0(struct i40iw_sc_qp
*qp
, u8 opcode
)
3023 struct i40iw_qp_uk
*qp_uk
;
3026 wqe
= qp_uk
->sq_base
->elem
;
3029 return I40IW_ERR_QP_TOOMANY_WRS_POSTED
;
3031 case I40IWQP_OP_NOP
:
3032 set_64bit_val(wqe
, 0, 0);
3033 set_64bit_val(wqe
, 8, 0);
3034 set_64bit_val(wqe
, 16, 0);
3035 header
= LS_64(I40IWQP_OP_NOP
, I40IWQPSQ_OPCODE
) |
3036 LS_64(qp
->qp_uk
.swqe_polarity
, I40IWQPSQ_VALID
);
3038 i40iw_insert_wqe_hdr(wqe
, header
);
3040 case I40IWQP_OP_RDMA_SEND
:
3041 set_64bit_val(wqe
, 0, 0);
3042 set_64bit_val(wqe
, 8, 0);
3043 set_64bit_val(wqe
, 16, 0);
3044 header
= LS_64(I40IWQP_OP_RDMA_SEND
, I40IWQPSQ_OPCODE
) |
3045 LS_64(qp
->qp_uk
.swqe_polarity
, I40IWQPSQ_VALID
) |
3046 LS_64(1, I40IWQPSQ_STREAMMODE
) |
3047 LS_64(1, I40IWQPSQ_WAITFORRCVPDU
);
3049 i40iw_insert_wqe_hdr(wqe
, header
);
3052 i40iw_debug(qp
->dev
, I40IW_DEBUG_QP
, "%s: Invalid WQE zero opcode\n",
3060 * i40iw_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
3061 * @dev : ptr to i40iw_dev struct
3062 * @hmc_fn_id: hmc function id
3064 enum i40iw_status_code
i40iw_sc_init_iw_hmc(struct i40iw_sc_dev
*dev
, u8 hmc_fn_id
)
3066 struct i40iw_hmc_info
*hmc_info
;
3067 struct i40iw_dma_mem query_fpm_mem
;
3068 struct i40iw_virt_mem virt_mem
;
3069 struct i40iw_vfdev
*vf_dev
= NULL
;
3071 enum i40iw_status_code ret_code
= 0;
3072 bool poll_registers
= true;
3076 if (hmc_fn_id
>= I40IW_MAX_VF_FPM_ID
||
3077 (dev
->hmc_fn_id
!= hmc_fn_id
&& hmc_fn_id
< I40IW_FIRST_VF_FPM_ID
))
3078 return I40IW_ERR_INVALID_HMCFN_ID
;
3080 i40iw_debug(dev
, I40IW_DEBUG_HMC
, "hmc_fn_id %u, dev->hmc_fn_id %u\n", hmc_fn_id
,
3082 if (hmc_fn_id
== dev
->hmc_fn_id
) {
3083 hmc_info
= dev
->hmc_info
;
3084 query_fpm_mem
.pa
= dev
->fpm_query_buf_pa
;
3085 query_fpm_mem
.va
= dev
->fpm_query_buf
;
3087 vf_dev
= i40iw_vfdev_from_fpm(dev
, hmc_fn_id
);
3089 return I40IW_ERR_INVALID_VF_ID
;
3091 hmc_info
= &vf_dev
->hmc_info
;
3092 iw_vf_idx
= vf_dev
->iw_vf_idx
;
3093 i40iw_debug(dev
, I40IW_DEBUG_HMC
, "vf_dev %p, hmc_info %p, hmc_obj %p\n", vf_dev
,
3094 hmc_info
, hmc_info
->hmc_obj
);
3095 if (!vf_dev
->fpm_query_buf
) {
3096 if (!dev
->vf_fpm_query_buf
[iw_vf_idx
].va
) {
3097 ret_code
= i40iw_alloc_query_fpm_buf(dev
,
3098 &dev
->vf_fpm_query_buf
[iw_vf_idx
]);
3102 vf_dev
->fpm_query_buf
= dev
->vf_fpm_query_buf
[iw_vf_idx
].va
;
3103 vf_dev
->fpm_query_buf_pa
= dev
->vf_fpm_query_buf
[iw_vf_idx
].pa
;
3105 query_fpm_mem
.pa
= vf_dev
->fpm_query_buf_pa
;
3106 query_fpm_mem
.va
= vf_dev
->fpm_query_buf
;
3108 * It is HARDWARE specific:
3109 * this call is done by PF for VF and
3110 * i40iw_sc_query_fpm_values needs ccq poll
3111 * because PF ccq is already created.
3113 poll_registers
= false;
3116 hmc_info
->hmc_fn_id
= hmc_fn_id
;
3118 if (hmc_fn_id
!= dev
->hmc_fn_id
) {
3120 i40iw_cqp_query_fpm_values_cmd(dev
, &query_fpm_mem
, hmc_fn_id
);
3122 wait_type
= poll_registers
? (u8
)I40IW_CQP_WAIT_POLL_REGS
:
3123 (u8
)I40IW_CQP_WAIT_POLL_CQ
;
3125 ret_code
= i40iw_sc_query_fpm_values(
3128 hmc_info
->hmc_fn_id
,
3136 /* parse the fpm_query_buf and fill hmc obj info */
3138 i40iw_sc_parse_fpm_query_buf((u64
*)query_fpm_mem
.va
,
3140 &dev
->hmc_fpm_misc
);
3143 i40iw_debug_buf(dev
, I40IW_DEBUG_HMC
, "QUERY FPM BUFFER",
3144 query_fpm_mem
.va
, I40IW_QUERY_FPM_BUF_SIZE
);
3146 if (hmc_fn_id
!= dev
->hmc_fn_id
) {
3147 i40iw_cqp_commit_fpm_values_cmd(dev
, &query_fpm_mem
, hmc_fn_id
);
3149 /* parse the fpm_commit_buf and fill hmc obj info */
3150 i40iw_sc_parse_fpm_commit_buf((u64
*)query_fpm_mem
.va
, hmc_info
->hmc_obj
);
3151 mem_size
= sizeof(struct i40iw_hmc_sd_entry
) *
3152 (hmc_info
->sd_table
.sd_cnt
+ hmc_info
->first_sd_index
);
3153 ret_code
= i40iw_allocate_virt_mem(dev
->hw
, &virt_mem
, mem_size
);
3156 hmc_info
->sd_table
.sd_entry
= virt_mem
.va
;
3159 /* fill size of objects which are fixed */
3160 hmc_info
->hmc_obj
[I40IW_HMC_IW_XFFL
].size
= 4;
3161 hmc_info
->hmc_obj
[I40IW_HMC_IW_Q1FL
].size
= 4;
3162 hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].size
= 8;
3163 hmc_info
->hmc_obj
[I40IW_HMC_IW_APBVT_ENTRY
].size
= 8192;
3164 hmc_info
->hmc_obj
[I40IW_HMC_IW_APBVT_ENTRY
].max_cnt
= 1;
3170 * i40iw_sc_configure_iw_fpm() - commits hmc obj cnt values using cqp command and
3171 * populates fpm base address in hmc_info
3172 * @dev : ptr to i40iw_dev struct
3173 * @hmc_fn_id: hmc function id
3175 static enum i40iw_status_code
i40iw_sc_configure_iw_fpm(struct i40iw_sc_dev
*dev
,
3178 struct i40iw_hmc_info
*hmc_info
;
3179 struct i40iw_hmc_obj_info
*obj_info
;
3181 struct i40iw_dma_mem commit_fpm_mem
;
3183 enum i40iw_status_code ret_code
= 0;
3184 bool poll_registers
= true;
3187 if (hmc_fn_id
>= I40IW_MAX_VF_FPM_ID
||
3188 (dev
->hmc_fn_id
!= hmc_fn_id
&& hmc_fn_id
< I40IW_FIRST_VF_FPM_ID
))
3189 return I40IW_ERR_INVALID_HMCFN_ID
;
3191 if (hmc_fn_id
== dev
->hmc_fn_id
) {
3192 hmc_info
= dev
->hmc_info
;
3194 hmc_info
= i40iw_vf_hmcinfo_from_fpm(dev
, hmc_fn_id
);
3195 poll_registers
= false;
3198 return I40IW_ERR_BAD_PTR
;
3200 obj_info
= hmc_info
->hmc_obj
;
3201 buf
= dev
->fpm_commit_buf
;
3203 /* copy cnt values in commit buf */
3204 for (i
= I40IW_HMC_IW_QP
, j
= 0; i
<= I40IW_HMC_IW_PBLE
;
3206 set_64bit_val(buf
, j
, (u64
)obj_info
[i
].cnt
);
3208 set_64bit_val(buf
, 40, 0); /* APBVT rsvd */
3210 commit_fpm_mem
.pa
= dev
->fpm_commit_buf_pa
;
3211 commit_fpm_mem
.va
= dev
->fpm_commit_buf
;
3212 wait_type
= poll_registers
? (u8
)I40IW_CQP_WAIT_POLL_REGS
:
3213 (u8
)I40IW_CQP_WAIT_POLL_CQ
;
3214 ret_code
= i40iw_sc_commit_fpm_values(
3217 hmc_info
->hmc_fn_id
,
3222 /* parse the fpm_commit_buf and fill hmc obj info */
3224 ret_code
= i40iw_sc_parse_fpm_commit_buf(dev
->fpm_commit_buf
, hmc_info
->hmc_obj
);
3226 i40iw_debug_buf(dev
, I40IW_DEBUG_HMC
, "COMMIT FPM BUFFER",
3227 commit_fpm_mem
.va
, I40IW_COMMIT_FPM_BUF_SIZE
);
3233 * cqp_sds_wqe_fill - fill cqp wqe doe sd
3234 * @cqp: struct for cqp hw
3235 * @info; sd info for wqe
3236 * @scratch: u64 saved to be used during cqp completion
3238 static enum i40iw_status_code
cqp_sds_wqe_fill(struct i40iw_sc_cqp
*cqp
,
3239 struct i40iw_update_sds_info
*info
,
3245 int mem_entries
, wqe_entries
;
3246 struct i40iw_dma_mem
*sdbuf
= &cqp
->sdbuf
;
3248 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
3250 return I40IW_ERR_RING_FULL
;
3252 I40IW_CQP_INIT_WQE(wqe
);
3253 wqe_entries
= (info
->cnt
> 3) ? 3 : info
->cnt
;
3254 mem_entries
= info
->cnt
- wqe_entries
;
3256 header
= LS_64(I40IW_CQP_OP_UPDATE_PE_SDS
, I40IW_CQPSQ_OPCODE
) |
3257 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
) |
3258 LS_64(mem_entries
, I40IW_CQPSQ_UPESD_ENTRY_COUNT
);
3261 memcpy(sdbuf
->va
, &info
->entry
[3], (mem_entries
<< 4));
3266 data
|= LS_64(info
->hmc_fn_id
, I40IW_CQPSQ_UPESD_HMCFNID
);
3268 set_64bit_val(wqe
, 16, data
);
3270 switch (wqe_entries
) {
3272 set_64bit_val(wqe
, 48,
3273 (LS_64(info
->entry
[2].cmd
, I40IW_CQPSQ_UPESD_SDCMD
) |
3274 LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID
)));
3276 set_64bit_val(wqe
, 56, info
->entry
[2].data
);
3279 set_64bit_val(wqe
, 32,
3280 (LS_64(info
->entry
[1].cmd
, I40IW_CQPSQ_UPESD_SDCMD
) |
3281 LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID
)));
3283 set_64bit_val(wqe
, 40, info
->entry
[1].data
);
3286 set_64bit_val(wqe
, 0,
3287 LS_64(info
->entry
[0].cmd
, I40IW_CQPSQ_UPESD_SDCMD
));
3289 set_64bit_val(wqe
, 8, info
->entry
[0].data
);
3295 i40iw_insert_wqe_hdr(wqe
, header
);
3297 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "UPDATE_PE_SDS WQE",
3298 wqe
, I40IW_CQP_WQE_SIZE
* 8);
3303 * i40iw_update_pe_sds - cqp wqe for sd
3304 * @dev: ptr to i40iw_dev struct
3305 * @info: sd info for sd's
3306 * @scratch: u64 saved to be used during cqp completion
3308 static enum i40iw_status_code
i40iw_update_pe_sds(struct i40iw_sc_dev
*dev
,
3309 struct i40iw_update_sds_info
*info
,
3312 struct i40iw_sc_cqp
*cqp
= dev
->cqp
;
3313 enum i40iw_status_code ret_code
;
3315 ret_code
= cqp_sds_wqe_fill(cqp
, info
, scratch
);
3317 i40iw_sc_cqp_post_sq(cqp
);
3323 * i40iw_update_sds_noccq - update sd before ccq created
3324 * @dev: sc device struct
3325 * @info: sd info for sd's
3327 enum i40iw_status_code
i40iw_update_sds_noccq(struct i40iw_sc_dev
*dev
,
3328 struct i40iw_update_sds_info
*info
)
3330 u32 error
, val
, tail
;
3331 struct i40iw_sc_cqp
*cqp
= dev
->cqp
;
3332 enum i40iw_status_code ret_code
;
3334 ret_code
= cqp_sds_wqe_fill(cqp
, info
, 0);
3337 i40iw_get_cqp_reg_info(cqp
, &val
, &tail
, &error
);
3339 return I40IW_ERR_CQP_COMPL_ERROR
;
3341 i40iw_sc_cqp_post_sq(cqp
);
3342 ret_code
= i40iw_cqp_poll_registers(cqp
, tail
, I40IW_DONE_COUNT
);
3348 * i40iw_sc_suspend_qp - suspend qp for param change
3349 * @cqp: struct for cqp hw
3351 * @scratch: u64 saved to be used during cqp completion
3353 enum i40iw_status_code
i40iw_sc_suspend_qp(struct i40iw_sc_cqp
*cqp
,
3354 struct i40iw_sc_qp
*qp
,
3360 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
3362 return I40IW_ERR_RING_FULL
;
3363 header
= LS_64(qp
->qp_uk
.qp_id
, I40IW_CQPSQ_SUSPENDQP_QPID
) |
3364 LS_64(I40IW_CQP_OP_SUSPEND_QP
, I40IW_CQPSQ_OPCODE
) |
3365 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
3367 i40iw_insert_wqe_hdr(wqe
, header
);
3369 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "SUSPEND_QP WQE",
3370 wqe
, I40IW_CQP_WQE_SIZE
* 8);
3372 i40iw_sc_cqp_post_sq(cqp
);
3377 * i40iw_sc_resume_qp - resume qp after suspend
3378 * @cqp: struct for cqp hw
3380 * @scratch: u64 saved to be used during cqp completion
3382 enum i40iw_status_code
i40iw_sc_resume_qp(struct i40iw_sc_cqp
*cqp
,
3383 struct i40iw_sc_qp
*qp
,
3389 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
3391 return I40IW_ERR_RING_FULL
;
3394 LS_64(qp
->qs_handle
, I40IW_CQPSQ_RESUMEQP_QSHANDLE
));
3396 header
= LS_64(qp
->qp_uk
.qp_id
, I40IW_CQPSQ_RESUMEQP_QPID
) |
3397 LS_64(I40IW_CQP_OP_RESUME_QP
, I40IW_CQPSQ_OPCODE
) |
3398 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
3400 i40iw_insert_wqe_hdr(wqe
, header
);
3402 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "RESUME_QP WQE",
3403 wqe
, I40IW_CQP_WQE_SIZE
* 8);
3405 i40iw_sc_cqp_post_sq(cqp
);
3410 * i40iw_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
3411 * @cqp: struct for cqp hw
3412 * @scratch: u64 saved to be used during cqp completion
3413 * @hmc_fn_id: hmc function id
3414 * @post_sq: flag for cqp db to ring
3415 * @poll_registers: flag to poll register for cqp completion
3417 enum i40iw_status_code
i40iw_sc_static_hmc_pages_allocated(
3418 struct i40iw_sc_cqp
*cqp
,
3422 bool poll_registers
)
3426 u32 tail
, val
, error
;
3427 enum i40iw_status_code ret_code
= 0;
3429 wqe
= i40iw_sc_cqp_get_next_send_wqe(cqp
, scratch
);
3431 return I40IW_ERR_RING_FULL
;
3434 LS_64(hmc_fn_id
, I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID
));
3436 header
= LS_64(I40IW_CQP_OP_SHMC_PAGES_ALLOCATED
, I40IW_CQPSQ_OPCODE
) |
3437 LS_64(cqp
->polarity
, I40IW_CQPSQ_WQEVALID
);
3439 i40iw_insert_wqe_hdr(wqe
, header
);
3441 i40iw_debug_buf(cqp
->dev
, I40IW_DEBUG_WQE
, "SHMC_PAGES_ALLOCATED WQE",
3442 wqe
, I40IW_CQP_WQE_SIZE
* 8);
3443 i40iw_get_cqp_reg_info(cqp
, &val
, &tail
, &error
);
3445 ret_code
= I40IW_ERR_CQP_COMPL_ERROR
;
3449 i40iw_sc_cqp_post_sq(cqp
);
3451 /* check for cqp sq tail update */
3452 ret_code
= i40iw_cqp_poll_registers(cqp
, tail
, 1000);
3454 ret_code
= i40iw_sc_poll_for_cqp_op_done(cqp
,
3455 I40IW_CQP_OP_SHMC_PAGES_ALLOCATED
,
3463 * i40iw_ring_full - check if cqp ring is full
3464 * @cqp: struct for cqp hw
3466 static bool i40iw_ring_full(struct i40iw_sc_cqp
*cqp
)
3468 return I40IW_RING_FULL_ERR(cqp
->sq_ring
);
3472 * i40iw_config_fpm_values - configure HMC objects
3473 * @dev: sc device struct
3474 * @qp_count: desired qp count
3476 enum i40iw_status_code
i40iw_config_fpm_values(struct i40iw_sc_dev
*dev
, u32 qp_count
)
3478 struct i40iw_virt_mem virt_mem
;
3480 u32 qpwantedoriginal
, qpwanted
, mrwanted
, pblewanted
;
3482 u64 sd_needed
, bytes_needed
;
3485 struct i40iw_hmc_info
*hmc_info
;
3486 struct i40iw_hmc_fpm_misc
*hmc_fpm_misc
;
3487 enum i40iw_status_code ret_code
= 0;
3489 hmc_info
= dev
->hmc_info
;
3490 hmc_fpm_misc
= &dev
->hmc_fpm_misc
;
3492 ret_code
= i40iw_sc_init_iw_hmc(dev
, dev
->hmc_fn_id
);
3494 i40iw_debug(dev
, I40IW_DEBUG_HMC
,
3495 "i40iw_sc_init_iw_hmc returned error_code = %d\n",
3501 for (i
= I40IW_HMC_IW_QP
; i
< I40IW_HMC_IW_MAX
; i
++) {
3502 hmc_info
->hmc_obj
[i
].cnt
= hmc_info
->hmc_obj
[i
].max_cnt
;
3504 (hmc_info
->hmc_obj
[i
].max_cnt
) * (hmc_info
->hmc_obj
[i
].size
);
3505 i40iw_debug(dev
, I40IW_DEBUG_HMC
,
3506 "%s i[%04d] max_cnt[0x%04X] size[0x%04llx]\n",
3507 __func__
, i
, hmc_info
->hmc_obj
[i
].max_cnt
,
3508 hmc_info
->hmc_obj
[i
].size
);
3510 sd_needed
= (bytes_needed
/ I40IW_HMC_DIRECT_BP_SIZE
) + 1; /* round up */
3511 i40iw_debug(dev
, I40IW_DEBUG_HMC
,
3512 "%s: FW initial max sd_count[%08lld] first_sd_index[%04d]\n",
3513 __func__
, sd_needed
, hmc_info
->first_sd_index
);
3514 i40iw_debug(dev
, I40IW_DEBUG_HMC
,
3515 "%s: bytes_needed=0x%llx sd count %d where max sd is %d\n",
3516 __func__
, bytes_needed
, hmc_info
->sd_table
.sd_cnt
,
3517 hmc_fpm_misc
->max_sds
);
3519 qpwanted
= min(qp_count
, hmc_info
->hmc_obj
[I40IW_HMC_IW_QP
].max_cnt
);
3520 qpwantedoriginal
= qpwanted
;
3521 mrwanted
= hmc_info
->hmc_obj
[I40IW_HMC_IW_MR
].max_cnt
;
3522 pblewanted
= hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].max_cnt
;
3524 i40iw_debug(dev
, I40IW_DEBUG_HMC
,
3525 "req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d\n",
3526 qp_count
, hmc_fpm_misc
->max_sds
,
3527 hmc_info
->hmc_obj
[I40IW_HMC_IW_QP
].max_cnt
,
3528 hmc_info
->hmc_obj
[I40IW_HMC_IW_CQ
].max_cnt
,
3529 hmc_info
->hmc_obj
[I40IW_HMC_IW_MR
].max_cnt
,
3530 hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].max_cnt
);
3534 hmc_info
->hmc_obj
[I40IW_HMC_IW_QP
].cnt
= qpwanted
;
3535 hmc_info
->hmc_obj
[I40IW_HMC_IW_CQ
].cnt
=
3536 min(2 * qpwanted
, hmc_info
->hmc_obj
[I40IW_HMC_IW_CQ
].cnt
);
3537 hmc_info
->hmc_obj
[I40IW_HMC_IW_SRQ
].cnt
= 0x00; /* Reserved */
3538 hmc_info
->hmc_obj
[I40IW_HMC_IW_HTE
].cnt
=
3539 qpwanted
* hmc_fpm_misc
->ht_multiplier
;
3540 hmc_info
->hmc_obj
[I40IW_HMC_IW_ARP
].cnt
=
3541 hmc_info
->hmc_obj
[I40IW_HMC_IW_ARP
].max_cnt
;
3542 hmc_info
->hmc_obj
[I40IW_HMC_IW_APBVT_ENTRY
].cnt
= 1;
3543 hmc_info
->hmc_obj
[I40IW_HMC_IW_MR
].cnt
= mrwanted
;
3545 hmc_info
->hmc_obj
[I40IW_HMC_IW_XF
].cnt
= I40IW_MAX_WQ_ENTRIES
* qpwanted
;
3546 hmc_info
->hmc_obj
[I40IW_HMC_IW_Q1
].cnt
= 4 * I40IW_MAX_IRD_SIZE
* qpwanted
;
3547 hmc_info
->hmc_obj
[I40IW_HMC_IW_XFFL
].cnt
=
3548 hmc_info
->hmc_obj
[I40IW_HMC_IW_XF
].cnt
/ hmc_fpm_misc
->xf_block_size
;
3549 hmc_info
->hmc_obj
[I40IW_HMC_IW_Q1FL
].cnt
=
3550 hmc_info
->hmc_obj
[I40IW_HMC_IW_Q1
].cnt
/ hmc_fpm_misc
->q1_block_size
;
3551 hmc_info
->hmc_obj
[I40IW_HMC_IW_TIMER
].cnt
=
3552 ((qpwanted
) / 512 + 1) * hmc_fpm_misc
->timer_bucket
;
3553 hmc_info
->hmc_obj
[I40IW_HMC_IW_FSIMC
].cnt
= 0x00;
3554 hmc_info
->hmc_obj
[I40IW_HMC_IW_FSIAV
].cnt
= 0x00;
3555 hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
= pblewanted
;
3557 /* How much memory is needed for all the objects. */
3559 for (i
= I40IW_HMC_IW_QP
; i
< I40IW_HMC_IW_MAX
; i
++)
3561 (hmc_info
->hmc_obj
[i
].cnt
) * (hmc_info
->hmc_obj
[i
].size
);
3562 sd_needed
= (bytes_needed
/ I40IW_HMC_DIRECT_BP_SIZE
) + 1;
3563 if ((loop_count
> 1000) ||
3564 ((!(loop_count
% 10)) &&
3565 (qpwanted
> qpwantedoriginal
* 2 / 3))) {
3566 if (qpwanted
> FPM_MULTIPLIER
) {
3567 qpwanted
-= FPM_MULTIPLIER
;
3569 while (powerof2
< qpwanted
)
3572 qpwanted
= powerof2
;
3577 if (mrwanted
> FPM_MULTIPLIER
* 10)
3578 mrwanted
-= FPM_MULTIPLIER
* 10;
3579 if (pblewanted
> FPM_MULTIPLIER
* 1000)
3580 pblewanted
-= FPM_MULTIPLIER
* 1000;
3581 } while (sd_needed
> hmc_fpm_misc
->max_sds
&& loop_count
< 2000);
3584 for (i
= I40IW_HMC_IW_QP
; i
< I40IW_HMC_IW_MAX
; i
++) {
3585 bytes_needed
+= (hmc_info
->hmc_obj
[i
].cnt
) * (hmc_info
->hmc_obj
[i
].size
);
3586 i40iw_debug(dev
, I40IW_DEBUG_HMC
,
3587 "%s i[%04d] cnt[0x%04x] size[0x%04llx]\n",
3588 __func__
, i
, hmc_info
->hmc_obj
[i
].cnt
,
3589 hmc_info
->hmc_obj
[i
].size
);
3591 sd_needed
= (bytes_needed
/ I40IW_HMC_DIRECT_BP_SIZE
) + 1; /* round up not truncate. */
3593 i40iw_debug(dev
, I40IW_DEBUG_HMC
,
3594 "loop_cnt=%d, sd_needed=%lld, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d\n",
3595 loop_count
, sd_needed
,
3596 hmc_info
->hmc_obj
[I40IW_HMC_IW_QP
].cnt
,
3597 hmc_info
->hmc_obj
[I40IW_HMC_IW_CQ
].cnt
,
3598 hmc_info
->hmc_obj
[I40IW_HMC_IW_MR
].cnt
,
3599 hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
);
3601 ret_code
= i40iw_sc_configure_iw_fpm(dev
, dev
->hmc_fn_id
);
3603 i40iw_debug(dev
, I40IW_DEBUG_HMC
,
3604 "configure_iw_fpm returned error_code[x%08X]\n",
3605 i40iw_rd32(dev
->hw
, dev
->is_pf
? I40E_PFPE_CQPERRCODES
: I40E_VFPE_CQPERRCODES1
));
3609 hmc_info
->sd_table
.sd_cnt
= (u32
)sd_needed
;
3611 mem_size
= sizeof(struct i40iw_hmc_sd_entry
) *
3612 (hmc_info
->sd_table
.sd_cnt
+ hmc_info
->first_sd_index
+ 1);
3613 ret_code
= i40iw_allocate_virt_mem(dev
->hw
, &virt_mem
, mem_size
);
3615 i40iw_debug(dev
, I40IW_DEBUG_HMC
,
3616 "%s: failed to allocate memory for sd_entry buffer\n",
3620 hmc_info
->sd_table
.sd_entry
= virt_mem
.va
;
3626 * i40iw_exec_cqp_cmd - execute cqp cmd when wqe are available
3628 * @pcmdinfo: cqp command info
3630 static enum i40iw_status_code
i40iw_exec_cqp_cmd(struct i40iw_sc_dev
*dev
,
3631 struct cqp_commands_info
*pcmdinfo
)
3633 enum i40iw_status_code status
;
3634 struct i40iw_dma_mem values_mem
;
3636 dev
->cqp_cmd_stats
[pcmdinfo
->cqp_cmd
]++;
3637 switch (pcmdinfo
->cqp_cmd
) {
3638 case OP_DELETE_LOCAL_MAC_IPADDR_ENTRY
:
3639 status
= i40iw_sc_del_local_mac_ipaddr_entry(
3640 pcmdinfo
->in
.u
.del_local_mac_ipaddr_entry
.cqp
,
3641 pcmdinfo
->in
.u
.del_local_mac_ipaddr_entry
.scratch
,
3642 pcmdinfo
->in
.u
.del_local_mac_ipaddr_entry
.entry_idx
,
3643 pcmdinfo
->in
.u
.del_local_mac_ipaddr_entry
.ignore_ref_count
,
3646 case OP_CEQ_DESTROY
:
3647 status
= i40iw_sc_ceq_destroy(pcmdinfo
->in
.u
.ceq_destroy
.ceq
,
3648 pcmdinfo
->in
.u
.ceq_destroy
.scratch
,
3651 case OP_AEQ_DESTROY
:
3652 status
= i40iw_sc_aeq_destroy(pcmdinfo
->in
.u
.aeq_destroy
.aeq
,
3653 pcmdinfo
->in
.u
.aeq_destroy
.scratch
,
3657 case OP_DELETE_ARP_CACHE_ENTRY
:
3658 status
= i40iw_sc_del_arp_cache_entry(
3659 pcmdinfo
->in
.u
.del_arp_cache_entry
.cqp
,
3660 pcmdinfo
->in
.u
.del_arp_cache_entry
.scratch
,
3661 pcmdinfo
->in
.u
.del_arp_cache_entry
.arp_index
,
3664 case OP_MANAGE_APBVT_ENTRY
:
3665 status
= i40iw_sc_manage_apbvt_entry(
3666 pcmdinfo
->in
.u
.manage_apbvt_entry
.cqp
,
3667 &pcmdinfo
->in
.u
.manage_apbvt_entry
.info
,
3668 pcmdinfo
->in
.u
.manage_apbvt_entry
.scratch
,
3672 status
= i40iw_sc_ceq_create(pcmdinfo
->in
.u
.ceq_create
.ceq
,
3673 pcmdinfo
->in
.u
.ceq_create
.scratch
,
3677 status
= i40iw_sc_aeq_create(pcmdinfo
->in
.u
.aeq_create
.aeq
,
3678 pcmdinfo
->in
.u
.aeq_create
.scratch
,
3681 case OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY
:
3682 status
= i40iw_sc_alloc_local_mac_ipaddr_entry(
3683 pcmdinfo
->in
.u
.alloc_local_mac_ipaddr_entry
.cqp
,
3684 pcmdinfo
->in
.u
.alloc_local_mac_ipaddr_entry
.scratch
,
3687 case OP_ADD_LOCAL_MAC_IPADDR_ENTRY
:
3688 status
= i40iw_sc_add_local_mac_ipaddr_entry(
3689 pcmdinfo
->in
.u
.add_local_mac_ipaddr_entry
.cqp
,
3690 &pcmdinfo
->in
.u
.add_local_mac_ipaddr_entry
.info
,
3691 pcmdinfo
->in
.u
.add_local_mac_ipaddr_entry
.scratch
,
3694 case OP_MANAGE_QHASH_TABLE_ENTRY
:
3695 status
= i40iw_sc_manage_qhash_table_entry(
3696 pcmdinfo
->in
.u
.manage_qhash_table_entry
.cqp
,
3697 &pcmdinfo
->in
.u
.manage_qhash_table_entry
.info
,
3698 pcmdinfo
->in
.u
.manage_qhash_table_entry
.scratch
,
3703 status
= i40iw_sc_qp_modify(
3704 pcmdinfo
->in
.u
.qp_modify
.qp
,
3705 &pcmdinfo
->in
.u
.qp_modify
.info
,
3706 pcmdinfo
->in
.u
.qp_modify
.scratch
,
3710 case OP_QP_UPLOAD_CONTEXT
:
3711 status
= i40iw_sc_qp_upload_context(
3712 pcmdinfo
->in
.u
.qp_upload_context
.dev
,
3713 &pcmdinfo
->in
.u
.qp_upload_context
.info
,
3714 pcmdinfo
->in
.u
.qp_upload_context
.scratch
,
3719 status
= i40iw_sc_cq_create(
3720 pcmdinfo
->in
.u
.cq_create
.cq
,
3721 pcmdinfo
->in
.u
.cq_create
.scratch
,
3722 pcmdinfo
->in
.u
.cq_create
.check_overflow
,
3726 status
= i40iw_sc_cq_destroy(
3727 pcmdinfo
->in
.u
.cq_destroy
.cq
,
3728 pcmdinfo
->in
.u
.cq_destroy
.scratch
,
3733 status
= i40iw_sc_qp_create(
3734 pcmdinfo
->in
.u
.qp_create
.qp
,
3735 &pcmdinfo
->in
.u
.qp_create
.info
,
3736 pcmdinfo
->in
.u
.qp_create
.scratch
,
3740 status
= i40iw_sc_qp_destroy(
3741 pcmdinfo
->in
.u
.qp_destroy
.qp
,
3742 pcmdinfo
->in
.u
.qp_destroy
.scratch
,
3743 pcmdinfo
->in
.u
.qp_destroy
.remove_hash_idx
,
3744 pcmdinfo
->in
.u
.qp_destroy
.
3750 status
= i40iw_sc_alloc_stag(
3751 pcmdinfo
->in
.u
.alloc_stag
.dev
,
3752 &pcmdinfo
->in
.u
.alloc_stag
.info
,
3753 pcmdinfo
->in
.u
.alloc_stag
.scratch
,
3756 case OP_MR_REG_NON_SHARED
:
3757 status
= i40iw_sc_mr_reg_non_shared(
3758 pcmdinfo
->in
.u
.mr_reg_non_shared
.dev
,
3759 &pcmdinfo
->in
.u
.mr_reg_non_shared
.info
,
3760 pcmdinfo
->in
.u
.mr_reg_non_shared
.scratch
,
3764 case OP_DEALLOC_STAG
:
3765 status
= i40iw_sc_dealloc_stag(
3766 pcmdinfo
->in
.u
.dealloc_stag
.dev
,
3767 &pcmdinfo
->in
.u
.dealloc_stag
.info
,
3768 pcmdinfo
->in
.u
.dealloc_stag
.scratch
,
3773 status
= i40iw_sc_mw_alloc(
3774 pcmdinfo
->in
.u
.mw_alloc
.dev
,
3775 pcmdinfo
->in
.u
.mw_alloc
.scratch
,
3776 pcmdinfo
->in
.u
.mw_alloc
.mw_stag_index
,
3777 pcmdinfo
->in
.u
.mw_alloc
.pd_id
,
3781 case OP_QP_FLUSH_WQES
:
3782 status
= i40iw_sc_qp_flush_wqes(
3783 pcmdinfo
->in
.u
.qp_flush_wqes
.qp
,
3784 &pcmdinfo
->in
.u
.qp_flush_wqes
.info
,
3785 pcmdinfo
->in
.u
.qp_flush_wqes
.
3786 scratch
, pcmdinfo
->post_sq
);
3788 case OP_ADD_ARP_CACHE_ENTRY
:
3789 status
= i40iw_sc_add_arp_cache_entry(
3790 pcmdinfo
->in
.u
.add_arp_cache_entry
.cqp
,
3791 &pcmdinfo
->in
.u
.add_arp_cache_entry
.info
,
3792 pcmdinfo
->in
.u
.add_arp_cache_entry
.scratch
,
3795 case OP_MANAGE_PUSH_PAGE
:
3796 status
= i40iw_sc_manage_push_page(
3797 pcmdinfo
->in
.u
.manage_push_page
.cqp
,
3798 &pcmdinfo
->in
.u
.manage_push_page
.info
,
3799 pcmdinfo
->in
.u
.manage_push_page
.scratch
,
3802 case OP_UPDATE_PE_SDS
:
3803 /* case I40IW_CQP_OP_UPDATE_PE_SDS */
3804 status
= i40iw_update_pe_sds(
3805 pcmdinfo
->in
.u
.update_pe_sds
.dev
,
3806 &pcmdinfo
->in
.u
.update_pe_sds
.info
,
3807 pcmdinfo
->in
.u
.update_pe_sds
.
3811 case OP_MANAGE_HMC_PM_FUNC_TABLE
:
3812 status
= i40iw_sc_manage_hmc_pm_func_table(
3813 pcmdinfo
->in
.u
.manage_hmc_pm
.dev
->cqp
,
3814 pcmdinfo
->in
.u
.manage_hmc_pm
.scratch
,
3815 (u8
)pcmdinfo
->in
.u
.manage_hmc_pm
.info
.vf_id
,
3816 pcmdinfo
->in
.u
.manage_hmc_pm
.info
.free_fcn
,
3820 status
= i40iw_sc_suspend_qp(
3821 pcmdinfo
->in
.u
.suspend_resume
.cqp
,
3822 pcmdinfo
->in
.u
.suspend_resume
.qp
,
3823 pcmdinfo
->in
.u
.suspend_resume
.scratch
);
3826 status
= i40iw_sc_resume_qp(
3827 pcmdinfo
->in
.u
.suspend_resume
.cqp
,
3828 pcmdinfo
->in
.u
.suspend_resume
.qp
,
3829 pcmdinfo
->in
.u
.suspend_resume
.scratch
);
3831 case OP_MANAGE_VF_PBLE_BP
:
3832 status
= i40iw_manage_vf_pble_bp(
3833 pcmdinfo
->in
.u
.manage_vf_pble_bp
.cqp
,
3834 &pcmdinfo
->in
.u
.manage_vf_pble_bp
.info
,
3835 pcmdinfo
->in
.u
.manage_vf_pble_bp
.scratch
, true);
3837 case OP_QUERY_FPM_VALUES
:
3838 values_mem
.pa
= pcmdinfo
->in
.u
.query_fpm_values
.fpm_values_pa
;
3839 values_mem
.va
= pcmdinfo
->in
.u
.query_fpm_values
.fpm_values_va
;
3840 status
= i40iw_sc_query_fpm_values(
3841 pcmdinfo
->in
.u
.query_fpm_values
.cqp
,
3842 pcmdinfo
->in
.u
.query_fpm_values
.scratch
,
3843 pcmdinfo
->in
.u
.query_fpm_values
.hmc_fn_id
,
3844 &values_mem
, true, I40IW_CQP_WAIT_EVENT
);
3846 case OP_COMMIT_FPM_VALUES
:
3847 values_mem
.pa
= pcmdinfo
->in
.u
.commit_fpm_values
.fpm_values_pa
;
3848 values_mem
.va
= pcmdinfo
->in
.u
.commit_fpm_values
.fpm_values_va
;
3849 status
= i40iw_sc_commit_fpm_values(
3850 pcmdinfo
->in
.u
.commit_fpm_values
.cqp
,
3851 pcmdinfo
->in
.u
.commit_fpm_values
.scratch
,
3852 pcmdinfo
->in
.u
.commit_fpm_values
.hmc_fn_id
,
3855 I40IW_CQP_WAIT_EVENT
);
3858 status
= I40IW_NOT_SUPPORTED
;
3866 * i40iw_process_cqp_cmd - process all cqp commands
3867 * @dev: sc device struct
3868 * @pcmdinfo: cqp command info
3870 enum i40iw_status_code
i40iw_process_cqp_cmd(struct i40iw_sc_dev
*dev
,
3871 struct cqp_commands_info
*pcmdinfo
)
3873 enum i40iw_status_code status
= 0;
3874 unsigned long flags
;
3876 spin_lock_irqsave(&dev
->cqp_lock
, flags
);
3877 if (list_empty(&dev
->cqp_cmd_head
) && !i40iw_ring_full(dev
->cqp
))
3878 status
= i40iw_exec_cqp_cmd(dev
, pcmdinfo
);
3880 list_add_tail(&pcmdinfo
->cqp_cmd_entry
, &dev
->cqp_cmd_head
);
3881 spin_unlock_irqrestore(&dev
->cqp_lock
, flags
);
3886 * i40iw_process_bh - called from tasklet for cqp list
3887 * @dev: sc device struct
3889 enum i40iw_status_code
i40iw_process_bh(struct i40iw_sc_dev
*dev
)
3891 enum i40iw_status_code status
= 0;
3892 struct cqp_commands_info
*pcmdinfo
;
3893 unsigned long flags
;
3895 spin_lock_irqsave(&dev
->cqp_lock
, flags
);
3896 while (!list_empty(&dev
->cqp_cmd_head
) && !i40iw_ring_full(dev
->cqp
)) {
3897 pcmdinfo
= (struct cqp_commands_info
*)i40iw_remove_head(&dev
->cqp_cmd_head
);
3899 status
= i40iw_exec_cqp_cmd(dev
, pcmdinfo
);
3903 spin_unlock_irqrestore(&dev
->cqp_lock
, flags
);
3908 * i40iw_iwarp_opcode - determine if incoming is rdma layer
3909 * @info: aeq info for the packet
3910 * @pkt: packet for error
3912 static u32
i40iw_iwarp_opcode(struct i40iw_aeqe_info
*info
, u8
*pkt
)
3915 u32 opcode
= 0xffffffff;
3917 if (info
->q2_data_written
) {
3919 opcode
= ntohs(mpa
[1]) & 0xf;
3925 * i40iw_locate_mpa - return pointer to mpa in the pkt
3926 * @pkt: packet with data
3928 static u8
*i40iw_locate_mpa(u8
*pkt
)
3930 /* skip over ethernet header */
3931 pkt
+= I40IW_MAC_HLEN
;
3933 /* Skip over IP and TCP headers */
3934 pkt
+= 4 * (pkt
[0] & 0x0f);
3935 pkt
+= 4 * ((pkt
[12] >> 4) & 0x0f);
3940 * i40iw_setup_termhdr - termhdr for terminate pkt
3941 * @qp: sc qp ptr for pkt
3943 * @opcode: flush opcode for termhdr
3944 * @layer_etype: error layer + error type
3945 * @err: error cod ein the header
3947 static void i40iw_setup_termhdr(struct i40iw_sc_qp
*qp
,
3948 struct i40iw_terminate_hdr
*hdr
,
3949 enum i40iw_flush_opcode opcode
,
3953 qp
->flush_code
= opcode
;
3954 hdr
->layer_etype
= layer_etype
;
3955 hdr
->error_code
= err
;
3959 * i40iw_bld_terminate_hdr - build terminate message header
3960 * @qp: qp associated with received terminate AE
3961 * @info: the struct contiaing AE information
3963 static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp
*qp
,
3964 struct i40iw_aeqe_info
*info
)
3966 u8
*pkt
= qp
->q2_buf
+ Q2_BAD_FRAME_OFFSET
;
3970 enum i40iw_flush_opcode flush_code
= FLUSH_INVALID
;
3972 struct i40iw_terminate_hdr
*termhdr
;
3974 termhdr
= (struct i40iw_terminate_hdr
*)qp
->q2_buf
;
3975 memset(termhdr
, 0, Q2_BAD_FRAME_OFFSET
);
3977 if (info
->q2_data_written
) {
3978 /* Use data from offending packet to fill in ddp & rdma hdrs */
3979 pkt
= i40iw_locate_mpa(pkt
);
3980 ddp_seg_len
= ntohs(*(u16
*)pkt
);
3983 termhdr
->hdrct
= DDP_LEN_FLAG
;
3984 if (pkt
[2] & 0x80) {
3986 if (ddp_seg_len
>= TERM_DDP_LEN_TAGGED
) {
3987 copy_len
+= TERM_DDP_LEN_TAGGED
;
3988 termhdr
->hdrct
|= DDP_HDR_FLAG
;
3991 if (ddp_seg_len
>= TERM_DDP_LEN_UNTAGGED
) {
3992 copy_len
+= TERM_DDP_LEN_UNTAGGED
;
3993 termhdr
->hdrct
|= DDP_HDR_FLAG
;
3996 if (ddp_seg_len
>= (TERM_DDP_LEN_UNTAGGED
+ TERM_RDMA_LEN
)) {
3997 if ((pkt
[3] & RDMA_OPCODE_MASK
) == RDMA_READ_REQ_OPCODE
) {
3998 copy_len
+= TERM_RDMA_LEN
;
3999 termhdr
->hdrct
|= RDMA_HDR_FLAG
;
4006 opcode
= i40iw_iwarp_opcode(info
, pkt
);
4008 switch (info
->ae_id
) {
4009 case I40IW_AE_AMP_UNALLOCATED_STAG
:
4010 qp
->eventtype
= TERM_EVENT_QP_ACCESS_ERR
;
4011 if (opcode
== I40IW_OP_TYPE_RDMA_WRITE
)
4012 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_PROT_ERR
,
4013 (LAYER_DDP
<< 4) | DDP_TAGGED_BUFFER
, DDP_TAGGED_INV_STAG
);
4015 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_ACCESS_ERR
,
4016 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_PROT
, RDMAP_INV_STAG
);
4018 case I40IW_AE_AMP_BOUNDS_VIOLATION
:
4019 qp
->eventtype
= TERM_EVENT_QP_ACCESS_ERR
;
4020 if (info
->q2_data_written
)
4021 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_PROT_ERR
,
4022 (LAYER_DDP
<< 4) | DDP_TAGGED_BUFFER
, DDP_TAGGED_BOUNDS
);
4024 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_ACCESS_ERR
,
4025 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_PROT
, RDMAP_INV_BOUNDS
);
4027 case I40IW_AE_AMP_BAD_PD
:
4029 case I40IW_OP_TYPE_RDMA_WRITE
:
4030 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_PROT_ERR
,
4031 (LAYER_DDP
<< 4) | DDP_TAGGED_BUFFER
, DDP_TAGGED_UNASSOC_STAG
);
4033 case I40IW_OP_TYPE_SEND_INV
:
4034 case I40IW_OP_TYPE_SEND_SOL_INV
:
4035 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_ACCESS_ERR
,
4036 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_PROT
, RDMAP_CANT_INV_STAG
);
4039 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_ACCESS_ERR
,
4040 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_PROT
, RDMAP_UNASSOC_STAG
);
4043 case I40IW_AE_AMP_INVALID_STAG
:
4044 qp
->eventtype
= TERM_EVENT_QP_ACCESS_ERR
;
4045 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_ACCESS_ERR
,
4046 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_PROT
, RDMAP_INV_STAG
);
4048 case I40IW_AE_AMP_BAD_QP
:
4049 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_LOC_QP_OP_ERR
,
4050 (LAYER_DDP
<< 4) | DDP_UNTAGGED_BUFFER
, DDP_UNTAGGED_INV_QN
);
4052 case I40IW_AE_AMP_BAD_STAG_KEY
:
4053 case I40IW_AE_AMP_BAD_STAG_INDEX
:
4054 qp
->eventtype
= TERM_EVENT_QP_ACCESS_ERR
;
4056 case I40IW_OP_TYPE_SEND_INV
:
4057 case I40IW_OP_TYPE_SEND_SOL_INV
:
4058 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_OP_ERR
,
4059 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_OP
, RDMAP_CANT_INV_STAG
);
4062 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_ACCESS_ERR
,
4063 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_OP
, RDMAP_INV_STAG
);
4066 case I40IW_AE_AMP_RIGHTS_VIOLATION
:
4067 case I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS
:
4068 case I40IW_AE_PRIV_OPERATION_DENIED
:
4069 qp
->eventtype
= TERM_EVENT_QP_ACCESS_ERR
;
4070 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_ACCESS_ERR
,
4071 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_PROT
, RDMAP_ACCESS
);
4073 case I40IW_AE_AMP_TO_WRAP
:
4074 qp
->eventtype
= TERM_EVENT_QP_ACCESS_ERR
;
4075 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_ACCESS_ERR
,
4076 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_PROT
, RDMAP_TO_WRAP
);
4078 case I40IW_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH
:
4079 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_LOC_LEN_ERR
,
4080 (LAYER_MPA
<< 4) | DDP_LLP
, MPA_MARKER
);
4082 case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR
:
4083 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_GENERAL_ERR
,
4084 (LAYER_MPA
<< 4) | DDP_LLP
, MPA_CRC
);
4086 case I40IW_AE_LLP_SEGMENT_TOO_LARGE
:
4087 case I40IW_AE_LLP_SEGMENT_TOO_SMALL
:
4088 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_LOC_LEN_ERR
,
4089 (LAYER_DDP
<< 4) | DDP_CATASTROPHIC
, DDP_CATASTROPHIC_LOCAL
);
4091 case I40IW_AE_LCE_QP_CATASTROPHIC
:
4092 case I40IW_AE_DDP_NO_L_BIT
:
4093 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_FATAL_ERR
,
4094 (LAYER_DDP
<< 4) | DDP_CATASTROPHIC
, DDP_CATASTROPHIC_LOCAL
);
4096 case I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN
:
4097 case I40IW_AE_DDP_INVALID_MSN_RANGE_IS_NOT_VALID
:
4098 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_GENERAL_ERR
,
4099 (LAYER_DDP
<< 4) | DDP_UNTAGGED_BUFFER
, DDP_UNTAGGED_INV_MSN_RANGE
);
4101 case I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER
:
4102 qp
->eventtype
= TERM_EVENT_QP_ACCESS_ERR
;
4103 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_LOC_LEN_ERR
,
4104 (LAYER_DDP
<< 4) | DDP_UNTAGGED_BUFFER
, DDP_UNTAGGED_INV_TOO_LONG
);
4106 case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION
:
4108 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_GENERAL_ERR
,
4109 (LAYER_DDP
<< 4) | DDP_TAGGED_BUFFER
, DDP_TAGGED_INV_DDP_VER
);
4111 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_GENERAL_ERR
,
4112 (LAYER_DDP
<< 4) | DDP_UNTAGGED_BUFFER
, DDP_UNTAGGED_INV_DDP_VER
);
4114 case I40IW_AE_DDP_UBE_INVALID_MO
:
4115 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_GENERAL_ERR
,
4116 (LAYER_DDP
<< 4) | DDP_UNTAGGED_BUFFER
, DDP_UNTAGGED_INV_MO
);
4118 case I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE
:
4119 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_REM_OP_ERR
,
4120 (LAYER_DDP
<< 4) | DDP_UNTAGGED_BUFFER
, DDP_UNTAGGED_INV_MSN_NO_BUF
);
4122 case I40IW_AE_DDP_UBE_INVALID_QN
:
4123 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_GENERAL_ERR
,
4124 (LAYER_DDP
<< 4) | DDP_UNTAGGED_BUFFER
, DDP_UNTAGGED_INV_QN
);
4126 case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION
:
4127 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_GENERAL_ERR
,
4128 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_OP
, RDMAP_INV_RDMAP_VER
);
4130 case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE
:
4131 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_LOC_QP_OP_ERR
,
4132 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_OP
, RDMAP_UNEXPECTED_OP
);
4135 i40iw_setup_termhdr(qp
, termhdr
, FLUSH_FATAL_ERR
,
4136 (LAYER_RDMA
<< 4) | RDMAP_REMOTE_OP
, RDMAP_UNSPECIFIED
);
4141 memcpy(termhdr
+ 1, pkt
, copy_len
);
4143 if (flush_code
&& !info
->in_rdrsp_wr
)
4144 qp
->sq_flush
= (info
->sq
) ? true : false;
4146 return sizeof(struct i40iw_terminate_hdr
) + copy_len
;
4150 * i40iw_terminate_send_fin() - Send fin for terminate message
4151 * @qp: qp associated with received terminate AE
4153 void i40iw_terminate_send_fin(struct i40iw_sc_qp
*qp
)
4155 /* Send the fin only */
4156 i40iw_term_modify_qp(qp
,
4157 I40IW_QP_STATE_TERMINATE
,
4158 I40IWQP_TERM_SEND_FIN_ONLY
,
4163 * i40iw_terminate_connection() - Bad AE and send terminate to remote QP
4164 * @qp: qp associated with received terminate AE
4165 * @info: the struct contiaing AE information
4167 void i40iw_terminate_connection(struct i40iw_sc_qp
*qp
, struct i40iw_aeqe_info
*info
)
4171 if (qp
->term_flags
& I40IW_TERM_SENT
)
4172 return; /* Sanity check */
4174 /* Eventtype can change from bld_terminate_hdr */
4175 qp
->eventtype
= TERM_EVENT_QP_FATAL
;
4176 termlen
= i40iw_bld_terminate_hdr(qp
, info
);
4177 i40iw_terminate_start_timer(qp
);
4178 qp
->term_flags
|= I40IW_TERM_SENT
;
4179 i40iw_term_modify_qp(qp
, I40IW_QP_STATE_TERMINATE
,
4180 I40IWQP_TERM_SEND_TERM_ONLY
, termlen
);
4184 * i40iw_terminate_received - handle terminate received AE
4185 * @qp: qp associated with received terminate AE
4186 * @info: the struct contiaing AE information
4188 void i40iw_terminate_received(struct i40iw_sc_qp
*qp
, struct i40iw_aeqe_info
*info
)
4190 u8
*pkt
= qp
->q2_buf
+ Q2_BAD_FRAME_OFFSET
;
4195 struct i40iw_terminate_hdr
*termhdr
;
4197 mpa
= (u32
*)i40iw_locate_mpa(pkt
);
4198 if (info
->q2_data_written
) {
4199 /* did not validate the frame - do it now */
4200 ddp_ctl
= (ntohl(mpa
[0]) >> 8) & 0xff;
4201 rdma_ctl
= ntohl(mpa
[0]) & 0xff;
4202 if ((ddp_ctl
& 0xc0) != 0x40)
4203 aeq_id
= I40IW_AE_LCE_QP_CATASTROPHIC
;
4204 else if ((ddp_ctl
& 0x03) != 1)
4205 aeq_id
= I40IW_AE_DDP_UBE_INVALID_DDP_VERSION
;
4206 else if (ntohl(mpa
[2]) != 2)
4207 aeq_id
= I40IW_AE_DDP_UBE_INVALID_QN
;
4208 else if (ntohl(mpa
[3]) != 1)
4209 aeq_id
= I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN
;
4210 else if (ntohl(mpa
[4]) != 0)
4211 aeq_id
= I40IW_AE_DDP_UBE_INVALID_MO
;
4212 else if ((rdma_ctl
& 0xc0) != 0x40)
4213 aeq_id
= I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION
;
4215 info
->ae_id
= aeq_id
;
4217 /* Bad terminate recvd - send back a terminate */
4218 i40iw_terminate_connection(qp
, info
);
4223 qp
->term_flags
|= I40IW_TERM_RCVD
;
4224 qp
->eventtype
= TERM_EVENT_QP_FATAL
;
4225 termhdr
= (struct i40iw_terminate_hdr
*)&mpa
[5];
4226 if (termhdr
->layer_etype
== RDMAP_REMOTE_PROT
||
4227 termhdr
->layer_etype
== RDMAP_REMOTE_OP
) {
4228 i40iw_terminate_done(qp
, 0);
4230 i40iw_terminate_start_timer(qp
);
4231 i40iw_terminate_send_fin(qp
);
4236 * i40iw_hw_stat_init - Initiliaze HW stats table
4237 * @devstat: pestat struct
4238 * @fcn_idx: PCI fn id
4239 * @hw: PF i40iw_hw structure.
4240 * @is_pf: Is it a PF?
4242 * Populate the HW stat table with register offset addr for each
4243 * stat. And start the perioidic stats timer.
4245 static void i40iw_hw_stat_init(struct i40iw_dev_pestat
*devstat
,
4247 struct i40iw_hw
*hw
, bool is_pf
)
4249 u32 stat_reg_offset
;
4251 struct i40iw_dev_hw_stat_offsets
*stat_table
=
4252 &devstat
->hw_stat_offsets
;
4253 struct i40iw_dev_hw_stats
*last_rd_stats
= &devstat
->last_read_hw_stats
;
4258 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_IP4RXDISCARD
] =
4259 I40E_GLPES_PFIP4RXDISCARD(fcn_idx
);
4260 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_IP4RXTRUNC
] =
4261 I40E_GLPES_PFIP4RXTRUNC(fcn_idx
);
4262 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_IP4TXNOROUTE
] =
4263 I40E_GLPES_PFIP4TXNOROUTE(fcn_idx
);
4264 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_IP6RXDISCARD
] =
4265 I40E_GLPES_PFIP6RXDISCARD(fcn_idx
);
4266 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_IP6RXTRUNC
] =
4267 I40E_GLPES_PFIP6RXTRUNC(fcn_idx
);
4268 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_IP6TXNOROUTE
] =
4269 I40E_GLPES_PFIP6TXNOROUTE(fcn_idx
);
4270 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_TCPRTXSEG
] =
4271 I40E_GLPES_PFTCPRTXSEG(fcn_idx
);
4272 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_TCPRXOPTERR
] =
4273 I40E_GLPES_PFTCPRXOPTERR(fcn_idx
);
4274 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_TCPRXPROTOERR
] =
4275 I40E_GLPES_PFTCPRXPROTOERR(fcn_idx
);
4277 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4RXOCTS
] =
4278 I40E_GLPES_PFIP4RXOCTSLO(fcn_idx
);
4279 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4RXPKTS
] =
4280 I40E_GLPES_PFIP4RXPKTSLO(fcn_idx
);
4281 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4RXFRAGS
] =
4282 I40E_GLPES_PFIP4RXFRAGSLO(fcn_idx
);
4283 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4RXMCPKTS
] =
4284 I40E_GLPES_PFIP4RXMCPKTSLO(fcn_idx
);
4285 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4TXOCTS
] =
4286 I40E_GLPES_PFIP4TXOCTSLO(fcn_idx
);
4287 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4TXPKTS
] =
4288 I40E_GLPES_PFIP4TXPKTSLO(fcn_idx
);
4289 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4TXFRAGS
] =
4290 I40E_GLPES_PFIP4TXFRAGSLO(fcn_idx
);
4291 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4TXMCPKTS
] =
4292 I40E_GLPES_PFIP4TXMCPKTSLO(fcn_idx
);
4293 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6RXOCTS
] =
4294 I40E_GLPES_PFIP6RXOCTSLO(fcn_idx
);
4295 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6RXPKTS
] =
4296 I40E_GLPES_PFIP6RXPKTSLO(fcn_idx
);
4297 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6RXFRAGS
] =
4298 I40E_GLPES_PFIP6RXFRAGSLO(fcn_idx
);
4299 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6RXMCPKTS
] =
4300 I40E_GLPES_PFIP6RXMCPKTSLO(fcn_idx
);
4301 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6TXOCTS
] =
4302 I40E_GLPES_PFIP6TXOCTSLO(fcn_idx
);
4303 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6TXPKTS
] =
4304 I40E_GLPES_PFIP6TXPKTSLO(fcn_idx
);
4305 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6TXPKTS
] =
4306 I40E_GLPES_PFIP6TXPKTSLO(fcn_idx
);
4307 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6TXFRAGS
] =
4308 I40E_GLPES_PFIP6TXFRAGSLO(fcn_idx
);
4309 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_TCPRXSEGS
] =
4310 I40E_GLPES_PFTCPRXSEGSLO(fcn_idx
);
4311 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_TCPTXSEG
] =
4312 I40E_GLPES_PFTCPTXSEGLO(fcn_idx
);
4313 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMARXRDS
] =
4314 I40E_GLPES_PFRDMARXRDSLO(fcn_idx
);
4315 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMARXSNDS
] =
4316 I40E_GLPES_PFRDMARXSNDSLO(fcn_idx
);
4317 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMARXWRS
] =
4318 I40E_GLPES_PFRDMARXWRSLO(fcn_idx
);
4319 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMATXRDS
] =
4320 I40E_GLPES_PFRDMATXRDSLO(fcn_idx
);
4321 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMATXSNDS
] =
4322 I40E_GLPES_PFRDMATXSNDSLO(fcn_idx
);
4323 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMATXWRS
] =
4324 I40E_GLPES_PFRDMATXWRSLO(fcn_idx
);
4325 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMAVBND
] =
4326 I40E_GLPES_PFRDMAVBNDLO(fcn_idx
);
4327 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMAVINV
] =
4328 I40E_GLPES_PFRDMAVINVLO(fcn_idx
);
4330 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_IP4RXDISCARD
] =
4331 I40E_GLPES_VFIP4RXDISCARD(fcn_idx
);
4332 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_IP4RXTRUNC
] =
4333 I40E_GLPES_VFIP4RXTRUNC(fcn_idx
);
4334 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_IP4TXNOROUTE
] =
4335 I40E_GLPES_VFIP4TXNOROUTE(fcn_idx
);
4336 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_IP6RXDISCARD
] =
4337 I40E_GLPES_VFIP6RXDISCARD(fcn_idx
);
4338 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_IP6RXTRUNC
] =
4339 I40E_GLPES_VFIP6RXTRUNC(fcn_idx
);
4340 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_IP6TXNOROUTE
] =
4341 I40E_GLPES_VFIP6TXNOROUTE(fcn_idx
);
4342 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_TCPRTXSEG
] =
4343 I40E_GLPES_VFTCPRTXSEG(fcn_idx
);
4344 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_TCPRXOPTERR
] =
4345 I40E_GLPES_VFTCPRXOPTERR(fcn_idx
);
4346 stat_table
->stat_offset_32
[I40IW_HW_STAT_INDEX_TCPRXPROTOERR
] =
4347 I40E_GLPES_VFTCPRXPROTOERR(fcn_idx
);
4349 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4RXOCTS
] =
4350 I40E_GLPES_VFIP4RXOCTSLO(fcn_idx
);
4351 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4RXPKTS
] =
4352 I40E_GLPES_VFIP4RXPKTSLO(fcn_idx
);
4353 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4RXFRAGS
] =
4354 I40E_GLPES_VFIP4RXFRAGSLO(fcn_idx
);
4355 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4RXMCPKTS
] =
4356 I40E_GLPES_VFIP4RXMCPKTSLO(fcn_idx
);
4357 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4TXOCTS
] =
4358 I40E_GLPES_VFIP4TXOCTSLO(fcn_idx
);
4359 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4TXPKTS
] =
4360 I40E_GLPES_VFIP4TXPKTSLO(fcn_idx
);
4361 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4TXFRAGS
] =
4362 I40E_GLPES_VFIP4TXFRAGSLO(fcn_idx
);
4363 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP4TXMCPKTS
] =
4364 I40E_GLPES_VFIP4TXMCPKTSLO(fcn_idx
);
4365 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6RXOCTS
] =
4366 I40E_GLPES_VFIP6RXOCTSLO(fcn_idx
);
4367 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6RXPKTS
] =
4368 I40E_GLPES_VFIP6RXPKTSLO(fcn_idx
);
4369 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6RXFRAGS
] =
4370 I40E_GLPES_VFIP6RXFRAGSLO(fcn_idx
);
4371 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6RXMCPKTS
] =
4372 I40E_GLPES_VFIP6RXMCPKTSLO(fcn_idx
);
4373 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6TXOCTS
] =
4374 I40E_GLPES_VFIP6TXOCTSLO(fcn_idx
);
4375 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6TXPKTS
] =
4376 I40E_GLPES_VFIP6TXPKTSLO(fcn_idx
);
4377 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6TXPKTS
] =
4378 I40E_GLPES_VFIP6TXPKTSLO(fcn_idx
);
4379 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_IP6TXFRAGS
] =
4380 I40E_GLPES_VFIP6TXFRAGSLO(fcn_idx
);
4381 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_TCPRXSEGS
] =
4382 I40E_GLPES_VFTCPRXSEGSLO(fcn_idx
);
4383 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_TCPTXSEG
] =
4384 I40E_GLPES_VFTCPTXSEGLO(fcn_idx
);
4385 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMARXRDS
] =
4386 I40E_GLPES_VFRDMARXRDSLO(fcn_idx
);
4387 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMARXSNDS
] =
4388 I40E_GLPES_VFRDMARXSNDSLO(fcn_idx
);
4389 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMARXWRS
] =
4390 I40E_GLPES_VFRDMARXWRSLO(fcn_idx
);
4391 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMATXRDS
] =
4392 I40E_GLPES_VFRDMATXRDSLO(fcn_idx
);
4393 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMATXSNDS
] =
4394 I40E_GLPES_VFRDMATXSNDSLO(fcn_idx
);
4395 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMATXWRS
] =
4396 I40E_GLPES_VFRDMATXWRSLO(fcn_idx
);
4397 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMAVBND
] =
4398 I40E_GLPES_VFRDMAVBNDLO(fcn_idx
);
4399 stat_table
->stat_offset_64
[I40IW_HW_STAT_INDEX_RDMAVINV
] =
4400 I40E_GLPES_VFRDMAVINVLO(fcn_idx
);
4403 for (stat_index
= 0; stat_index
< I40IW_HW_STAT_INDEX_MAX_64
;
4405 stat_reg_offset
= stat_table
->stat_offset_64
[stat_index
];
4406 last_rd_stats
->stat_value_64
[stat_index
] =
4407 readq(devstat
->hw
->hw_addr
+ stat_reg_offset
);
4410 for (stat_index
= 0; stat_index
< I40IW_HW_STAT_INDEX_MAX_32
;
4412 stat_reg_offset
= stat_table
->stat_offset_32
[stat_index
];
4413 last_rd_stats
->stat_value_32
[stat_index
] =
4414 i40iw_rd32(devstat
->hw
, stat_reg_offset
);
4419 * i40iw_hw_stat_read_32 - Read 32-bit HW stat counters and accommodates for roll-overs.
4420 * @devstat: pestat struct
4421 * @index: index in HW stat table which contains offset reg-addr
4422 * @value: hw stat value
4424 static void i40iw_hw_stat_read_32(struct i40iw_dev_pestat
*devstat
,
4425 enum i40iw_hw_stat_index_32b index
,
4428 struct i40iw_dev_hw_stat_offsets
*stat_table
=
4429 &devstat
->hw_stat_offsets
;
4430 struct i40iw_dev_hw_stats
*last_rd_stats
= &devstat
->last_read_hw_stats
;
4431 struct i40iw_dev_hw_stats
*hw_stats
= &devstat
->hw_stats
;
4432 u64 new_stat_value
= 0;
4433 u32 stat_reg_offset
= stat_table
->stat_offset_32
[index
];
4435 new_stat_value
= i40iw_rd32(devstat
->hw
, stat_reg_offset
);
4437 if (new_stat_value
< last_rd_stats
->stat_value_32
[index
])
4438 hw_stats
->stat_value_32
[index
] += new_stat_value
;
4440 hw_stats
->stat_value_32
[index
] +=
4441 new_stat_value
- last_rd_stats
->stat_value_32
[index
];
4442 last_rd_stats
->stat_value_32
[index
] = new_stat_value
;
4443 *value
= hw_stats
->stat_value_32
[index
];
4447 * i40iw_hw_stat_read_64 - Read HW stat counters (greater than 32-bit) and accommodates for roll-overs.
4448 * @devstat: pestat struct
4449 * @index: index in HW stat table which contains offset reg-addr
4450 * @value: hw stat value
4452 static void i40iw_hw_stat_read_64(struct i40iw_dev_pestat
*devstat
,
4453 enum i40iw_hw_stat_index_64b index
,
4456 struct i40iw_dev_hw_stat_offsets
*stat_table
=
4457 &devstat
->hw_stat_offsets
;
4458 struct i40iw_dev_hw_stats
*last_rd_stats
= &devstat
->last_read_hw_stats
;
4459 struct i40iw_dev_hw_stats
*hw_stats
= &devstat
->hw_stats
;
4460 u64 new_stat_value
= 0;
4461 u32 stat_reg_offset
= stat_table
->stat_offset_64
[index
];
4463 new_stat_value
= readq(devstat
->hw
->hw_addr
+ stat_reg_offset
);
4465 if (new_stat_value
< last_rd_stats
->stat_value_64
[index
])
4466 hw_stats
->stat_value_64
[index
] += new_stat_value
;
4468 hw_stats
->stat_value_64
[index
] +=
4469 new_stat_value
- last_rd_stats
->stat_value_64
[index
];
4470 last_rd_stats
->stat_value_64
[index
] = new_stat_value
;
4471 *value
= hw_stats
->stat_value_64
[index
];
4475 * i40iw_hw_stat_read_all - read all HW stat counters
4476 * @devstat: pestat struct
4477 * @stat_values: hw stats structure
4479 * Read all the HW stat counters and populates hw_stats structure
4480 * of passed-in dev's pestat as well as copy created in stat_values.
4482 static void i40iw_hw_stat_read_all(struct i40iw_dev_pestat
*devstat
,
4483 struct i40iw_dev_hw_stats
*stat_values
)
4487 for (stat_index
= 0; stat_index
< I40IW_HW_STAT_INDEX_MAX_32
;
4489 i40iw_hw_stat_read_32(devstat
, stat_index
,
4490 &stat_values
->stat_value_32
[stat_index
]);
4491 for (stat_index
= 0; stat_index
< I40IW_HW_STAT_INDEX_MAX_64
;
4493 i40iw_hw_stat_read_64(devstat
, stat_index
,
4494 &stat_values
->stat_value_64
[stat_index
]);
4498 * i40iw_hw_stat_refresh_all - Update all HW stat structs
4499 * @devstat: pestat struct
4500 * @stat_values: hw stats structure
4502 * Read all the HW stat counters to refresh values in hw_stats structure
4503 * of passed-in dev's pestat
4505 static void i40iw_hw_stat_refresh_all(struct i40iw_dev_pestat
*devstat
)
4510 for (stat_index
= 0; stat_index
< I40IW_HW_STAT_INDEX_MAX_32
;
4512 i40iw_hw_stat_read_32(devstat
, stat_index
, &stat_value
);
4513 for (stat_index
= 0; stat_index
< I40IW_HW_STAT_INDEX_MAX_64
;
4515 i40iw_hw_stat_read_64(devstat
, stat_index
, &stat_value
);
4518 static struct i40iw_cqp_ops iw_cqp_ops
= {
4520 i40iw_sc_cqp_create
,
4521 i40iw_sc_cqp_post_sq
,
4522 i40iw_sc_cqp_get_next_send_wqe
,
4523 i40iw_sc_cqp_destroy
,
4524 i40iw_sc_poll_for_cqp_op_done
4527 static struct i40iw_ccq_ops iw_ccq_ops
= {
4529 i40iw_sc_ccq_create
,
4530 i40iw_sc_ccq_destroy
,
4531 i40iw_sc_ccq_create_done
,
4532 i40iw_sc_ccq_get_cqe_info
,
4536 static struct i40iw_ceq_ops iw_ceq_ops
= {
4538 i40iw_sc_ceq_create
,
4539 i40iw_sc_cceq_create_done
,
4540 i40iw_sc_cceq_destroy_done
,
4541 i40iw_sc_cceq_create
,
4542 i40iw_sc_ceq_destroy
,
4543 i40iw_sc_process_ceq
4546 static struct i40iw_aeq_ops iw_aeq_ops
= {
4548 i40iw_sc_aeq_create
,
4549 i40iw_sc_aeq_destroy
,
4550 i40iw_sc_get_next_aeqe
,
4551 i40iw_sc_repost_aeq_entries
,
4552 i40iw_sc_aeq_create_done
,
4553 i40iw_sc_aeq_destroy_done
4557 static struct i40iw_pd_ops iw_pd_ops
= {
4561 static struct i40iw_priv_qp_ops iw_priv_qp_ops
= {
4565 i40iw_sc_qp_destroy
,
4566 i40iw_sc_qp_flush_wqes
,
4567 i40iw_sc_qp_upload_context
,
4570 i40iw_sc_send_lsmm_nostag
,
4575 static struct i40iw_priv_cq_ops iw_priv_cq_ops
= {
4578 i40iw_sc_cq_destroy
,
4582 static struct i40iw_mr_ops iw_mr_ops
= {
4583 i40iw_sc_alloc_stag
,
4584 i40iw_sc_mr_reg_non_shared
,
4585 i40iw_sc_mr_reg_shared
,
4586 i40iw_sc_dealloc_stag
,
4587 i40iw_sc_query_stag
,
4591 static struct i40iw_cqp_misc_ops iw_cqp_misc_ops
= {
4592 i40iw_sc_manage_push_page
,
4593 i40iw_sc_manage_hmc_pm_func_table
,
4594 i40iw_sc_set_hmc_resource_profile
,
4595 i40iw_sc_commit_fpm_values
,
4596 i40iw_sc_query_fpm_values
,
4597 i40iw_sc_static_hmc_pages_allocated
,
4598 i40iw_sc_add_arp_cache_entry
,
4599 i40iw_sc_del_arp_cache_entry
,
4600 i40iw_sc_query_arp_cache_entry
,
4601 i40iw_sc_manage_apbvt_entry
,
4602 i40iw_sc_manage_qhash_table_entry
,
4603 i40iw_sc_alloc_local_mac_ipaddr_entry
,
4604 i40iw_sc_add_local_mac_ipaddr_entry
,
4605 i40iw_sc_del_local_mac_ipaddr_entry
,
4607 i40iw_sc_commit_fpm_values_done
,
4608 i40iw_sc_query_fpm_values_done
,
4609 i40iw_sc_manage_hmc_pm_func_table_done
,
4610 i40iw_sc_suspend_qp
,
4614 static struct i40iw_hmc_ops iw_hmc_ops
= {
4615 i40iw_sc_init_iw_hmc
,
4616 i40iw_sc_parse_fpm_query_buf
,
4617 i40iw_sc_configure_iw_fpm
,
4618 i40iw_sc_parse_fpm_commit_buf
,
4619 i40iw_sc_create_hmc_obj
,
4620 i40iw_sc_del_hmc_obj
,
4625 static const struct i40iw_device_pestat_ops iw_device_pestat_ops
= {
4627 i40iw_hw_stat_read_32
,
4628 i40iw_hw_stat_read_64
,
4629 i40iw_hw_stat_read_all
,
4630 i40iw_hw_stat_refresh_all
4634 * i40iw_device_init_pestat - Initialize the pestat structure
4635 * @dev: pestat struct
4637 enum i40iw_status_code
i40iw_device_init_pestat(struct i40iw_dev_pestat
*devstat
)
4639 devstat
->ops
= iw_device_pestat_ops
;
4644 * i40iw_device_init - Initialize IWARP device
4645 * @dev: IWARP device pointer
4646 * @info: IWARP init info
4648 enum i40iw_status_code
i40iw_device_init(struct i40iw_sc_dev
*dev
,
4649 struct i40iw_device_init_info
*info
)
4654 enum i40iw_status_code ret_code
= 0;
4657 spin_lock_init(&dev
->cqp_lock
);
4658 INIT_LIST_HEAD(&dev
->cqp_cmd_head
); /* for the cqp commands backlog. */
4660 i40iw_device_init_uk(&dev
->dev_uk
);
4662 dev
->debug_mask
= info
->debug_mask
;
4664 ret_code
= i40iw_device_init_pestat(&dev
->dev_pestat
);
4666 i40iw_debug(dev
, I40IW_DEBUG_DEV
,
4667 "%s: i40iw_device_init_pestat failed\n", __func__
);
4670 dev
->hmc_fn_id
= info
->hmc_fn_id
;
4671 dev
->qs_handle
= info
->qs_handle
;
4672 dev
->exception_lan_queue
= info
->exception_lan_queue
;
4673 dev
->is_pf
= info
->is_pf
;
4675 dev
->fpm_query_buf_pa
= info
->fpm_query_buf_pa
;
4676 dev
->fpm_query_buf
= info
->fpm_query_buf
;
4678 dev
->fpm_commit_buf_pa
= info
->fpm_commit_buf_pa
;
4679 dev
->fpm_commit_buf
= info
->fpm_commit_buf
;
4682 dev
->hw
->hw_addr
= info
->bar0
;
4684 val
= i40iw_rd32(dev
->hw
, I40E_GLPCI_DREVID
);
4685 dev
->hw_rev
= (u8
)RS_32(val
, I40E_GLPCI_DREVID_DEFAULT_REVID
);
4688 dev
->dev_pestat
.ops
.iw_hw_stat_init(&dev
->dev_pestat
,
4689 dev
->hmc_fn_id
, dev
->hw
, true);
4690 spin_lock_init(&dev
->dev_pestat
.stats_lock
);
4691 /*start the periodic stats_timer */
4692 i40iw_hw_stats_start_timer(dev
);
4693 val
= i40iw_rd32(dev
->hw
, I40E_GLPCI_LBARCTRL
);
4694 db_size
= (u8
)RS_32(val
, I40E_GLPCI_LBARCTRL_PE_DB_SIZE
);
4695 if ((db_size
!= I40IW_PE_DB_SIZE_4M
) &&
4696 (db_size
!= I40IW_PE_DB_SIZE_8M
)) {
4697 i40iw_debug(dev
, I40IW_DEBUG_DEV
,
4698 "%s: PE doorbell is not enabled in CSR val 0x%x\n",
4700 ret_code
= I40IW_ERR_PE_DOORBELL_NOT_ENABLED
;
4703 dev
->db_addr
= dev
->hw
->hw_addr
+ I40IW_DB_ADDR_OFFSET
;
4704 dev
->vchnl_if
.vchnl_recv
= i40iw_vchnl_recv_pf
;
4706 dev
->db_addr
= dev
->hw
->hw_addr
+ I40IW_VF_DB_ADDR_OFFSET
;
4709 dev
->cqp_ops
= &iw_cqp_ops
;
4710 dev
->ccq_ops
= &iw_ccq_ops
;
4711 dev
->ceq_ops
= &iw_ceq_ops
;
4712 dev
->aeq_ops
= &iw_aeq_ops
;
4713 dev
->cqp_misc_ops
= &iw_cqp_misc_ops
;
4714 dev
->iw_pd_ops
= &iw_pd_ops
;
4715 dev
->iw_priv_qp_ops
= &iw_priv_qp_ops
;
4716 dev
->iw_priv_cq_ops
= &iw_priv_cq_ops
;
4717 dev
->mr_ops
= &iw_mr_ops
;
4718 dev
->hmc_ops
= &iw_hmc_ops
;
4719 dev
->vchnl_if
.vchnl_send
= info
->vchnl_send
;
4720 if (dev
->vchnl_if
.vchnl_send
)
4721 dev
->vchnl_up
= true;
4723 dev
->vchnl_up
= false;
4725 dev
->vchnl_if
.vchnl_recv
= i40iw_vchnl_recv_vf
;
4726 ret_code
= i40iw_vchnl_vf_get_ver(dev
, &vchnl_ver
);
4728 i40iw_debug(dev
, I40IW_DEBUG_DEV
,
4729 "%s: Get Channel version rc = 0x%0x, version is %u\n",
4730 __func__
, ret_code
, vchnl_ver
);
4731 ret_code
= i40iw_vchnl_vf_get_hmc_fcn(dev
, &hmc_fcn
);
4733 i40iw_debug(dev
, I40IW_DEBUG_DEV
,
4734 "%s Get HMC function rc = 0x%0x, hmc fcn is %u\n",
4735 __func__
, ret_code
, hmc_fcn
);
4736 dev
->hmc_fn_id
= (u8
)hmc_fcn
;
4740 dev
->iw_vf_cqp_ops
= &iw_vf_cqp_ops
;