1 /*******************************************************************************
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 *******************************************************************************/
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
40 #include <linux/tcp.h>
41 #include <linux/if_vlan.h>
46 * i40iw_initialize_hw_resources - initialize hw resource during open
47 * @iwdev: iwarp device
49 u32
i40iw_initialize_hw_resources(struct i40iw_device
*iwdev
)
51 unsigned long num_pds
;
60 max_qp
= iwdev
->sc_dev
.hmc_info
->hmc_obj
[I40IW_HMC_IW_QP
].cnt
;
61 max_cq
= iwdev
->sc_dev
.hmc_info
->hmc_obj
[I40IW_HMC_IW_CQ
].cnt
;
62 max_mr
= iwdev
->sc_dev
.hmc_info
->hmc_obj
[I40IW_HMC_IW_MR
].cnt
;
63 arp_table_size
= iwdev
->sc_dev
.hmc_info
->hmc_obj
[I40IW_HMC_IW_ARP
].cnt
;
64 iwdev
->max_cqe
= 0xFFFFF;
65 num_pds
= I40IW_MAX_PDS
;
66 resources_size
= sizeof(struct i40iw_arp_entry
) * arp_table_size
;
67 resources_size
+= sizeof(unsigned long) * BITS_TO_LONGS(max_qp
);
68 resources_size
+= sizeof(unsigned long) * BITS_TO_LONGS(max_mr
);
69 resources_size
+= sizeof(unsigned long) * BITS_TO_LONGS(max_cq
);
70 resources_size
+= sizeof(unsigned long) * BITS_TO_LONGS(num_pds
);
71 resources_size
+= sizeof(unsigned long) * BITS_TO_LONGS(arp_table_size
);
72 resources_size
+= sizeof(struct i40iw_qp
**) * max_qp
;
73 iwdev
->mem_resources
= kzalloc(resources_size
, GFP_KERNEL
);
75 if (!iwdev
->mem_resources
)
78 iwdev
->max_qp
= max_qp
;
79 iwdev
->max_mr
= max_mr
;
80 iwdev
->max_cq
= max_cq
;
81 iwdev
->max_pd
= num_pds
;
82 iwdev
->arp_table_size
= arp_table_size
;
83 iwdev
->arp_table
= (struct i40iw_arp_entry
*)iwdev
->mem_resources
;
84 resource_ptr
= iwdev
->mem_resources
+ (sizeof(struct i40iw_arp_entry
) * arp_table_size
);
86 iwdev
->device_cap_flags
= IB_DEVICE_LOCAL_DMA_LKEY
|
87 IB_DEVICE_MEM_WINDOW
| IB_DEVICE_MEM_MGT_EXTENSIONS
;
89 iwdev
->allocated_qps
= resource_ptr
;
90 iwdev
->allocated_cqs
= &iwdev
->allocated_qps
[BITS_TO_LONGS(max_qp
)];
91 iwdev
->allocated_mrs
= &iwdev
->allocated_cqs
[BITS_TO_LONGS(max_cq
)];
92 iwdev
->allocated_pds
= &iwdev
->allocated_mrs
[BITS_TO_LONGS(max_mr
)];
93 iwdev
->allocated_arps
= &iwdev
->allocated_pds
[BITS_TO_LONGS(num_pds
)];
94 iwdev
->qp_table
= (struct i40iw_qp
**)(&iwdev
->allocated_arps
[BITS_TO_LONGS(arp_table_size
)]);
95 set_bit(0, iwdev
->allocated_mrs
);
96 set_bit(0, iwdev
->allocated_qps
);
97 set_bit(0, iwdev
->allocated_cqs
);
98 set_bit(0, iwdev
->allocated_pds
);
99 set_bit(0, iwdev
->allocated_arps
);
101 /* Following for ILQ/IEQ */
102 set_bit(1, iwdev
->allocated_qps
);
103 set_bit(1, iwdev
->allocated_cqs
);
104 set_bit(1, iwdev
->allocated_pds
);
105 set_bit(2, iwdev
->allocated_cqs
);
106 set_bit(2, iwdev
->allocated_pds
);
108 spin_lock_init(&iwdev
->resource_lock
);
109 spin_lock_init(&iwdev
->qptable_lock
);
110 /* stag index mask has a minimum of 14 bits */
111 mrdrvbits
= 24 - max(get_count_order(iwdev
->max_mr
), 14);
112 iwdev
->mr_stagmask
= ~(((1 << mrdrvbits
) - 1) << (32 - mrdrvbits
));
117 * i40iw_cqp_ce_handler - handle cqp completions
118 * @iwdev: iwarp device
119 * @arm: flag to arm after completions
120 * @cq: cq for cqp completions
122 static void i40iw_cqp_ce_handler(struct i40iw_device
*iwdev
, struct i40iw_sc_cq
*cq
, bool arm
)
124 struct i40iw_cqp_request
*cqp_request
;
125 struct i40iw_sc_dev
*dev
= &iwdev
->sc_dev
;
127 struct i40iw_ccq_cqe_info info
;
131 memset(&info
, 0, sizeof(info
));
132 ret
= dev
->ccq_ops
->ccq_get_cqe_info(cq
, &info
);
135 cqp_request
= (struct i40iw_cqp_request
*)(unsigned long)info
.scratch
;
137 i40iw_pr_err("opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
138 info
.op_code
, info
.maj_err_code
, info
.min_err_code
);
140 cqp_request
->compl_info
.maj_err_code
= info
.maj_err_code
;
141 cqp_request
->compl_info
.min_err_code
= info
.min_err_code
;
142 cqp_request
->compl_info
.op_ret_val
= info
.op_ret_val
;
143 cqp_request
->compl_info
.error
= info
.error
;
145 if (cqp_request
->waiting
) {
146 cqp_request
->request_done
= true;
147 wake_up(&cqp_request
->waitq
);
148 i40iw_put_cqp_request(&iwdev
->cqp
, cqp_request
);
150 if (cqp_request
->callback_fcn
)
151 cqp_request
->callback_fcn(cqp_request
, 1);
152 i40iw_put_cqp_request(&iwdev
->cqp
, cqp_request
);
159 if (arm
&& cqe_count
) {
160 i40iw_process_bh(dev
);
161 dev
->ccq_ops
->ccq_arm(cq
);
166 * i40iw_iwarp_ce_handler - handle iwarp completions
167 * @iwdev: iwarp device
168 * @iwcp: iwarp cq receiving event
170 static void i40iw_iwarp_ce_handler(struct i40iw_device
*iwdev
,
171 struct i40iw_sc_cq
*iwcq
)
173 struct i40iw_cq
*i40iwcq
= iwcq
->back_cq
;
175 if (i40iwcq
->ibcq
.comp_handler
)
176 i40iwcq
->ibcq
.comp_handler(&i40iwcq
->ibcq
,
177 i40iwcq
->ibcq
.cq_context
);
181 * i40iw_puda_ce_handler - handle puda completion events
182 * @iwdev: iwarp device
183 * @cq: puda completion q for event
185 static void i40iw_puda_ce_handler(struct i40iw_device
*iwdev
,
186 struct i40iw_sc_cq
*cq
)
188 struct i40iw_sc_dev
*dev
= (struct i40iw_sc_dev
*)&iwdev
->sc_dev
;
189 enum i40iw_status_code status
;
193 status
= i40iw_puda_poll_completion(dev
, cq
, &compl_error
);
194 if (status
== I40IW_ERR_QUEUE_EMPTY
)
197 i40iw_pr_err("puda status = %d\n", status
);
201 i40iw_pr_err("puda compl_err =0x%x\n", compl_error
);
206 dev
->ccq_ops
->ccq_arm(cq
);
210 * i40iw_process_ceq - handle ceq for completions
211 * @iwdev: iwarp device
212 * @ceq: ceq having cq for completion
214 void i40iw_process_ceq(struct i40iw_device
*iwdev
, struct i40iw_ceq
*ceq
)
216 struct i40iw_sc_dev
*dev
= &iwdev
->sc_dev
;
217 struct i40iw_sc_ceq
*sc_ceq
;
218 struct i40iw_sc_cq
*cq
;
221 sc_ceq
= &ceq
->sc_ceq
;
223 cq
= dev
->ceq_ops
->process_ceq(dev
, sc_ceq
);
227 if (cq
->cq_type
== I40IW_CQ_TYPE_CQP
)
228 i40iw_cqp_ce_handler(iwdev
, cq
, arm
);
229 else if (cq
->cq_type
== I40IW_CQ_TYPE_IWARP
)
230 i40iw_iwarp_ce_handler(iwdev
, cq
);
231 else if ((cq
->cq_type
== I40IW_CQ_TYPE_ILQ
) ||
232 (cq
->cq_type
== I40IW_CQ_TYPE_IEQ
))
233 i40iw_puda_ce_handler(iwdev
, cq
);
238 * i40iw_next_iw_state - modify qp state
239 * @iwqp: iwarp qp to modify
240 * @state: next state for qp
241 * @del_hash: del hash
242 * @term: term message
243 * @termlen: length of term message
245 void i40iw_next_iw_state(struct i40iw_qp
*iwqp
,
251 struct i40iw_modify_qp_info info
;
253 memset(&info
, 0, sizeof(info
));
254 info
.next_iwarp_state
= state
;
255 info
.remove_hash_idx
= del_hash
;
256 info
.cq_num_valid
= true;
257 info
.arp_cache_idx_valid
= true;
258 info
.dont_send_term
= true;
259 info
.dont_send_fin
= true;
260 info
.termlen
= termlen
;
262 if (term
& I40IWQP_TERM_SEND_TERM_ONLY
)
263 info
.dont_send_term
= false;
264 if (term
& I40IWQP_TERM_SEND_FIN_ONLY
)
265 info
.dont_send_fin
= false;
266 if (iwqp
->sc_qp
.term_flags
&& (state
== I40IW_QP_STATE_ERROR
))
267 info
.reset_tcp_conn
= true;
268 iwqp
->hw_iwarp_state
= state
;
269 i40iw_hw_modify_qp(iwqp
->iwdev
, iwqp
, &info
, 0);
273 * i40iw_process_aeq - handle aeq events
274 * @iwdev: iwarp device
276 void i40iw_process_aeq(struct i40iw_device
*iwdev
)
278 struct i40iw_sc_dev
*dev
= &iwdev
->sc_dev
;
279 struct i40iw_aeq
*aeq
= &iwdev
->aeq
;
280 struct i40iw_sc_aeq
*sc_aeq
= &aeq
->sc_aeq
;
281 struct i40iw_aeqe_info aeinfo
;
282 struct i40iw_aeqe_info
*info
= &aeinfo
;
284 struct i40iw_qp
*iwqp
= NULL
;
285 struct i40iw_sc_cq
*cq
= NULL
;
286 struct i40iw_cq
*iwcq
= NULL
;
287 struct i40iw_sc_qp
*qp
= NULL
;
288 struct i40iw_qp_host_ctx_info
*ctx_info
= NULL
;
297 memset(info
, 0, sizeof(*info
));
298 ret
= dev
->aeq_ops
->get_next_aeqe(sc_aeq
, info
);
303 i40iw_debug(dev
, I40IW_DEBUG_AEQ
,
304 "%s ae_id = 0x%x bool qp=%d qp_id = %d\n",
305 __func__
, info
->ae_id
, info
->qp
, info
->qp_cq_id
);
307 spin_lock_irqsave(&iwdev
->qptable_lock
, flags
);
308 iwqp
= iwdev
->qp_table
[info
->qp_cq_id
];
310 spin_unlock_irqrestore(&iwdev
->qptable_lock
, flags
);
311 i40iw_debug(dev
, I40IW_DEBUG_AEQ
,
312 "%s qp_id %d is already freed\n",
313 __func__
, info
->qp_cq_id
);
316 i40iw_qp_add_ref(&iwqp
->ibqp
);
317 spin_unlock_irqrestore(&iwdev
->qptable_lock
, flags
);
319 spin_lock_irqsave(&iwqp
->lock
, flags
);
320 iwqp
->hw_tcp_state
= info
->tcp_state
;
321 iwqp
->hw_iwarp_state
= info
->iwarp_state
;
322 iwqp
->last_aeq
= info
->ae_id
;
323 spin_unlock_irqrestore(&iwqp
->lock
, flags
);
324 ctx_info
= &iwqp
->ctx_info
;
325 ctx_info
->err_rq_idx_valid
= true;
327 if (info
->ae_id
!= I40IW_AE_CQ_OPERATION_ERROR
)
331 switch (info
->ae_id
) {
332 case I40IW_AE_LLP_FIN_RECEIVED
:
335 if (atomic_inc_return(&iwqp
->close_timer_started
) == 1) {
336 iwqp
->hw_tcp_state
= I40IW_TCP_STATE_CLOSE_WAIT
;
337 if ((iwqp
->hw_tcp_state
== I40IW_TCP_STATE_CLOSE_WAIT
) &&
338 (iwqp
->ibqp_state
== IB_QPS_RTS
)) {
339 i40iw_next_iw_state(iwqp
,
340 I40IW_QP_STATE_CLOSING
, 0, 0, 0);
341 i40iw_cm_disconn(iwqp
);
343 iwqp
->cm_id
->add_ref(iwqp
->cm_id
);
344 i40iw_schedule_cm_timer(iwqp
->cm_node
,
345 (struct i40iw_puda_buf
*)iwqp
,
346 I40IW_TIMER_TYPE_CLOSE
, 1, 0);
349 case I40IW_AE_LLP_CLOSE_COMPLETE
:
351 i40iw_terminate_done(qp
, 0);
353 i40iw_cm_disconn(iwqp
);
355 case I40IW_AE_BAD_CLOSE
:
356 case I40IW_AE_RESET_SENT
:
357 i40iw_next_iw_state(iwqp
, I40IW_QP_STATE_ERROR
, 1, 0, 0);
358 i40iw_cm_disconn(iwqp
);
360 case I40IW_AE_LLP_CONNECTION_RESET
:
361 if (atomic_read(&iwqp
->close_timer_started
))
363 i40iw_cm_disconn(iwqp
);
365 case I40IW_AE_QP_SUSPEND_COMPLETE
:
366 i40iw_qp_suspend_resume(dev
, &iwqp
->sc_qp
, false);
368 case I40IW_AE_TERMINATE_SENT
:
369 i40iw_terminate_send_fin(qp
);
371 case I40IW_AE_LLP_TERMINATE_RECEIVED
:
372 i40iw_terminate_received(qp
, info
);
374 case I40IW_AE_CQ_OPERATION_ERROR
:
375 i40iw_pr_err("Processing an iWARP related AE for CQ misc = 0x%04X\n",
377 cq
= (struct i40iw_sc_cq
*)(unsigned long)info
->compl_ctx
;
378 iwcq
= (struct i40iw_cq
*)cq
->back_cq
;
380 if (iwcq
->ibcq
.event_handler
) {
381 struct ib_event ibevent
;
383 ibevent
.device
= iwcq
->ibcq
.device
;
384 ibevent
.event
= IB_EVENT_CQ_ERR
;
385 ibevent
.element
.cq
= &iwcq
->ibcq
;
386 iwcq
->ibcq
.event_handler(&ibevent
, iwcq
->ibcq
.cq_context
);
389 case I40IW_AE_LLP_DOUBT_REACHABILITY
:
391 case I40IW_AE_PRIV_OPERATION_DENIED
:
392 case I40IW_AE_STAG_ZERO_INVALID
:
393 case I40IW_AE_IB_RREQ_AND_Q1_FULL
:
394 case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION
:
395 case I40IW_AE_DDP_UBE_INVALID_MO
:
396 case I40IW_AE_DDP_UBE_INVALID_QN
:
397 case I40IW_AE_DDP_NO_L_BIT
:
398 case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION
:
399 case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE
:
400 case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST
:
401 case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP
:
402 case I40IW_AE_INVALID_ARP_ENTRY
:
403 case I40IW_AE_INVALID_TCP_OPTION_RCVD
:
404 case I40IW_AE_STALE_ARP_ENTRY
:
405 case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR
:
406 case I40IW_AE_LLP_SEGMENT_TOO_SMALL
:
407 case I40IW_AE_LLP_SYN_RECEIVED
:
408 case I40IW_AE_LLP_TOO_MANY_RETRIES
:
409 case I40IW_AE_LCE_QP_CATASTROPHIC
:
410 case I40IW_AE_LCE_FUNCTION_CATASTROPHIC
:
411 case I40IW_AE_LCE_CQ_CATASTROPHIC
:
412 case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG
:
413 case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT
:
414 ctx_info
->err_rq_idx_valid
= false;
417 if (!info
->sq
&& ctx_info
->err_rq_idx_valid
) {
418 ctx_info
->err_rq_idx
= info
->wqe_idx
;
419 ctx_info
->tcp_info_valid
= false;
420 ctx_info
->iwarp_info_valid
= false;
421 ret
= dev
->iw_priv_qp_ops
->qp_setctx(&iwqp
->sc_qp
,
425 i40iw_terminate_connection(qp
, info
);
429 i40iw_qp_rem_ref(&iwqp
->ibqp
);
433 dev
->aeq_ops
->repost_aeq_entries(dev
, aeqcnt
);
437 * i40iw_cqp_manage_abvpt_cmd - send cqp command manage abpvt
438 * @iwdev: iwarp device
439 * @accel_local_port: port for apbvt
440 * @add_port: add or delete port
442 static enum i40iw_status_code
443 i40iw_cqp_manage_abvpt_cmd(struct i40iw_device
*iwdev
,
444 u16 accel_local_port
,
447 struct i40iw_apbvt_info
*info
;
448 struct i40iw_cqp_request
*cqp_request
;
449 struct cqp_commands_info
*cqp_info
;
450 enum i40iw_status_code status
;
452 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, add_port
);
454 return I40IW_ERR_NO_MEMORY
;
456 cqp_info
= &cqp_request
->info
;
457 info
= &cqp_info
->in
.u
.manage_apbvt_entry
.info
;
459 memset(info
, 0, sizeof(*info
));
460 info
->add
= add_port
;
461 info
->port
= cpu_to_le16(accel_local_port
);
463 cqp_info
->cqp_cmd
= OP_MANAGE_APBVT_ENTRY
;
464 cqp_info
->post_sq
= 1;
465 cqp_info
->in
.u
.manage_apbvt_entry
.cqp
= &iwdev
->cqp
.sc_cqp
;
466 cqp_info
->in
.u
.manage_apbvt_entry
.scratch
= (uintptr_t)cqp_request
;
467 status
= i40iw_handle_cqp_op(iwdev
, cqp_request
);
469 i40iw_pr_err("CQP-OP Manage APBVT entry fail");
475 * i40iw_manage_apbvt - add or delete tcp port
476 * @iwdev: iwarp device
477 * @accel_local_port: port for apbvt
478 * @add_port: add or delete port
480 enum i40iw_status_code
i40iw_manage_apbvt(struct i40iw_device
*iwdev
,
481 u16 accel_local_port
,
484 struct i40iw_cm_core
*cm_core
= &iwdev
->cm_core
;
485 enum i40iw_status_code status
;
489 /* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to
490 * protect against race where add APBVT CQP can race ahead of the delete
491 * APBVT for same port.
494 spin_lock_irqsave(&cm_core
->apbvt_lock
, flags
);
495 in_use
= __test_and_set_bit(accel_local_port
,
496 cm_core
->ports_in_use
);
497 spin_unlock_irqrestore(&cm_core
->apbvt_lock
, flags
);
500 return i40iw_cqp_manage_abvpt_cmd(iwdev
, accel_local_port
,
503 spin_lock_irqsave(&cm_core
->apbvt_lock
, flags
);
504 in_use
= i40iw_port_in_use(cm_core
, accel_local_port
);
506 spin_unlock_irqrestore(&cm_core
->apbvt_lock
, flags
);
509 __clear_bit(accel_local_port
, cm_core
->ports_in_use
);
510 status
= i40iw_cqp_manage_abvpt_cmd(iwdev
, accel_local_port
,
512 spin_unlock_irqrestore(&cm_core
->apbvt_lock
, flags
);
518 * i40iw_manage_arp_cache - manage hw arp cache
519 * @iwdev: iwarp device
520 * @mac_addr: mac address ptr
521 * @ip_addr: ip addr for arp cache
522 * @action: add, delete or modify
524 void i40iw_manage_arp_cache(struct i40iw_device
*iwdev
,
525 unsigned char *mac_addr
,
530 struct i40iw_add_arp_cache_entry_info
*info
;
531 struct i40iw_cqp_request
*cqp_request
;
532 struct cqp_commands_info
*cqp_info
;
535 arp_index
= i40iw_arp_table(iwdev
, ip_addr
, ipv4
, mac_addr
, action
);
538 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, false);
542 cqp_info
= &cqp_request
->info
;
543 if (action
== I40IW_ARP_ADD
) {
544 cqp_info
->cqp_cmd
= OP_ADD_ARP_CACHE_ENTRY
;
545 info
= &cqp_info
->in
.u
.add_arp_cache_entry
.info
;
546 memset(info
, 0, sizeof(*info
));
547 info
->arp_index
= cpu_to_le16((u16
)arp_index
);
548 info
->permanent
= true;
549 ether_addr_copy(info
->mac_addr
, mac_addr
);
550 cqp_info
->in
.u
.add_arp_cache_entry
.scratch
= (uintptr_t)cqp_request
;
551 cqp_info
->in
.u
.add_arp_cache_entry
.cqp
= &iwdev
->cqp
.sc_cqp
;
553 cqp_info
->cqp_cmd
= OP_DELETE_ARP_CACHE_ENTRY
;
554 cqp_info
->in
.u
.del_arp_cache_entry
.scratch
= (uintptr_t)cqp_request
;
555 cqp_info
->in
.u
.del_arp_cache_entry
.cqp
= &iwdev
->cqp
.sc_cqp
;
556 cqp_info
->in
.u
.del_arp_cache_entry
.arp_index
= arp_index
;
559 cqp_info
->in
.u
.add_arp_cache_entry
.cqp
= &iwdev
->cqp
.sc_cqp
;
560 cqp_info
->in
.u
.add_arp_cache_entry
.scratch
= (uintptr_t)cqp_request
;
561 cqp_info
->post_sq
= 1;
562 if (i40iw_handle_cqp_op(iwdev
, cqp_request
))
563 i40iw_pr_err("CQP-OP Add/Del Arp Cache entry fail");
567 * i40iw_send_syn_cqp_callback - do syn/ack after qhash
568 * @cqp_request: qhash cqp completion
569 * @send_ack: flag send ack
571 static void i40iw_send_syn_cqp_callback(struct i40iw_cqp_request
*cqp_request
, u32 send_ack
)
573 i40iw_send_syn(cqp_request
->param
, send_ack
);
577 * i40iw_manage_qhash - add or modify qhash
578 * @iwdev: iwarp device
579 * @cminfo: cm info for qhash
580 * @etype: type (syn or quad)
581 * @mtype: type of qhash
582 * @cmnode: cmnode associated with connection
583 * @wait: wait for completion
584 * @user_pri:user pri of the connection
586 enum i40iw_status_code
i40iw_manage_qhash(struct i40iw_device
*iwdev
,
587 struct i40iw_cm_info
*cminfo
,
588 enum i40iw_quad_entry_type etype
,
589 enum i40iw_quad_hash_manage_type mtype
,
593 struct i40iw_qhash_table_info
*info
;
594 struct i40iw_sc_dev
*dev
= &iwdev
->sc_dev
;
595 struct i40iw_sc_vsi
*vsi
= &iwdev
->vsi
;
596 enum i40iw_status_code status
;
597 struct i40iw_cqp
*iwcqp
= &iwdev
->cqp
;
598 struct i40iw_cqp_request
*cqp_request
;
599 struct cqp_commands_info
*cqp_info
;
601 cqp_request
= i40iw_get_cqp_request(iwcqp
, wait
);
603 return I40IW_ERR_NO_MEMORY
;
604 cqp_info
= &cqp_request
->info
;
605 info
= &cqp_info
->in
.u
.manage_qhash_table_entry
.info
;
606 memset(info
, 0, sizeof(*info
));
608 info
->vsi
= &iwdev
->vsi
;
609 info
->manage
= mtype
;
610 info
->entry_type
= etype
;
611 if (cminfo
->vlan_id
!= 0xFFFF) {
612 info
->vlan_valid
= true;
613 info
->vlan_id
= cpu_to_le16(cminfo
->vlan_id
);
615 info
->vlan_valid
= false;
618 info
->ipv4_valid
= cminfo
->ipv4
;
619 info
->user_pri
= cminfo
->user_pri
;
620 ether_addr_copy(info
->mac_addr
, iwdev
->netdev
->dev_addr
);
621 info
->qp_num
= cpu_to_le32(vsi
->ilq
->qp_id
);
622 info
->dest_port
= cpu_to_le16(cminfo
->loc_port
);
623 info
->dest_ip
[0] = cpu_to_le32(cminfo
->loc_addr
[0]);
624 info
->dest_ip
[1] = cpu_to_le32(cminfo
->loc_addr
[1]);
625 info
->dest_ip
[2] = cpu_to_le32(cminfo
->loc_addr
[2]);
626 info
->dest_ip
[3] = cpu_to_le32(cminfo
->loc_addr
[3]);
627 if (etype
== I40IW_QHASH_TYPE_TCP_ESTABLISHED
) {
628 info
->src_port
= cpu_to_le16(cminfo
->rem_port
);
629 info
->src_ip
[0] = cpu_to_le32(cminfo
->rem_addr
[0]);
630 info
->src_ip
[1] = cpu_to_le32(cminfo
->rem_addr
[1]);
631 info
->src_ip
[2] = cpu_to_le32(cminfo
->rem_addr
[2]);
632 info
->src_ip
[3] = cpu_to_le32(cminfo
->rem_addr
[3]);
635 cqp_request
->callback_fcn
= i40iw_send_syn_cqp_callback
;
636 cqp_request
->param
= (void *)cmnode
;
639 if (info
->ipv4_valid
)
640 i40iw_debug(dev
, I40IW_DEBUG_CM
,
641 "%s:%s IP=%pI4, port=%d, mac=%pM, vlan_id=%d\n",
642 __func__
, (!mtype
) ? "DELETE" : "ADD",
644 info
->dest_port
, info
->mac_addr
, cminfo
->vlan_id
);
646 i40iw_debug(dev
, I40IW_DEBUG_CM
,
647 "%s:%s IP=%pI6, port=%d, mac=%pM, vlan_id=%d\n",
648 __func__
, (!mtype
) ? "DELETE" : "ADD",
650 info
->dest_port
, info
->mac_addr
, cminfo
->vlan_id
);
651 cqp_info
->in
.u
.manage_qhash_table_entry
.cqp
= &iwdev
->cqp
.sc_cqp
;
652 cqp_info
->in
.u
.manage_qhash_table_entry
.scratch
= (uintptr_t)cqp_request
;
653 cqp_info
->cqp_cmd
= OP_MANAGE_QHASH_TABLE_ENTRY
;
654 cqp_info
->post_sq
= 1;
655 status
= i40iw_handle_cqp_op(iwdev
, cqp_request
);
657 i40iw_pr_err("CQP-OP Manage Qhash Entry fail");
662 * i40iw_hw_flush_wqes - flush qp's wqe
663 * @iwdev: iwarp device
664 * @qp: hardware control qp
665 * @info: info for flush
666 * @wait: flag wait for completion
668 enum i40iw_status_code
i40iw_hw_flush_wqes(struct i40iw_device
*iwdev
,
669 struct i40iw_sc_qp
*qp
,
670 struct i40iw_qp_flush_info
*info
,
673 enum i40iw_status_code status
;
674 struct i40iw_qp_flush_info
*hw_info
;
675 struct i40iw_cqp_request
*cqp_request
;
676 struct cqp_commands_info
*cqp_info
;
677 struct i40iw_qp
*iwqp
= (struct i40iw_qp
*)qp
->back_qp
;
679 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, wait
);
681 return I40IW_ERR_NO_MEMORY
;
683 cqp_info
= &cqp_request
->info
;
684 hw_info
= &cqp_request
->info
.in
.u
.qp_flush_wqes
.info
;
685 memcpy(hw_info
, info
, sizeof(*hw_info
));
687 cqp_info
->cqp_cmd
= OP_QP_FLUSH_WQES
;
688 cqp_info
->post_sq
= 1;
689 cqp_info
->in
.u
.qp_flush_wqes
.qp
= qp
;
690 cqp_info
->in
.u
.qp_flush_wqes
.scratch
= (uintptr_t)cqp_request
;
691 status
= i40iw_handle_cqp_op(iwdev
, cqp_request
);
693 i40iw_pr_err("CQP-OP Flush WQE's fail");
694 complete(&iwqp
->sq_drained
);
695 complete(&iwqp
->rq_drained
);
698 if (!cqp_request
->compl_info
.maj_err_code
) {
699 switch (cqp_request
->compl_info
.min_err_code
) {
700 case I40IW_CQP_COMPL_RQ_WQE_FLUSHED
:
701 complete(&iwqp
->sq_drained
);
703 case I40IW_CQP_COMPL_SQ_WQE_FLUSHED
:
704 complete(&iwqp
->rq_drained
);
706 case I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED
:
709 complete(&iwqp
->sq_drained
);
710 complete(&iwqp
->rq_drained
);
719 * i40iw_gen_ae - generate AE
720 * @iwdev: iwarp device
721 * @qp: qp associated with AE
723 * @wait: wait for completion
725 void i40iw_gen_ae(struct i40iw_device
*iwdev
,
726 struct i40iw_sc_qp
*qp
,
727 struct i40iw_gen_ae_info
*info
,
730 struct i40iw_gen_ae_info
*ae_info
;
731 struct i40iw_cqp_request
*cqp_request
;
732 struct cqp_commands_info
*cqp_info
;
734 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, wait
);
738 cqp_info
= &cqp_request
->info
;
739 ae_info
= &cqp_request
->info
.in
.u
.gen_ae
.info
;
740 memcpy(ae_info
, info
, sizeof(*ae_info
));
742 cqp_info
->cqp_cmd
= OP_GEN_AE
;
743 cqp_info
->post_sq
= 1;
744 cqp_info
->in
.u
.gen_ae
.qp
= qp
;
745 cqp_info
->in
.u
.gen_ae
.scratch
= (uintptr_t)cqp_request
;
746 if (i40iw_handle_cqp_op(iwdev
, cqp_request
))
747 i40iw_pr_err("CQP OP failed attempting to generate ae_code=0x%x\n",
752 * i40iw_hw_manage_vf_pble_bp - manage vf pbles
753 * @iwdev: iwarp device
754 * @info: info for managing pble
755 * @wait: flag wait for completion
757 enum i40iw_status_code
i40iw_hw_manage_vf_pble_bp(struct i40iw_device
*iwdev
,
758 struct i40iw_manage_vf_pble_info
*info
,
761 enum i40iw_status_code status
;
762 struct i40iw_manage_vf_pble_info
*hw_info
;
763 struct i40iw_cqp_request
*cqp_request
;
764 struct cqp_commands_info
*cqp_info
;
766 if ((iwdev
->init_state
< CCQ_CREATED
) && wait
)
769 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, wait
);
771 return I40IW_ERR_NO_MEMORY
;
773 cqp_info
= &cqp_request
->info
;
774 hw_info
= &cqp_request
->info
.in
.u
.manage_vf_pble_bp
.info
;
775 memcpy(hw_info
, info
, sizeof(*hw_info
));
777 cqp_info
->cqp_cmd
= OP_MANAGE_VF_PBLE_BP
;
778 cqp_info
->post_sq
= 1;
779 cqp_info
->in
.u
.manage_vf_pble_bp
.cqp
= &iwdev
->cqp
.sc_cqp
;
780 cqp_info
->in
.u
.manage_vf_pble_bp
.scratch
= (uintptr_t)cqp_request
;
781 status
= i40iw_handle_cqp_op(iwdev
, cqp_request
);
783 i40iw_pr_err("CQP-OP Manage VF pble_bp fail");
788 * i40iw_get_ib_wc - return change flush code to IB's
789 * @opcode: iwarp flush code
791 static enum ib_wc_status
i40iw_get_ib_wc(enum i40iw_flush_opcode opcode
)
795 return IB_WC_LOC_PROT_ERR
;
796 case FLUSH_REM_ACCESS_ERR
:
797 return IB_WC_REM_ACCESS_ERR
;
798 case FLUSH_LOC_QP_OP_ERR
:
799 return IB_WC_LOC_QP_OP_ERR
;
800 case FLUSH_REM_OP_ERR
:
801 return IB_WC_REM_OP_ERR
;
802 case FLUSH_LOC_LEN_ERR
:
803 return IB_WC_LOC_LEN_ERR
;
804 case FLUSH_GENERAL_ERR
:
805 return IB_WC_GENERAL_ERR
;
806 case FLUSH_FATAL_ERR
:
808 return IB_WC_FATAL_ERR
;
813 * i40iw_set_flush_info - set flush info
814 * @pinfo: set flush info
817 * @opcode: flush error code
819 static void i40iw_set_flush_info(struct i40iw_qp_flush_info
*pinfo
,
822 enum i40iw_flush_opcode opcode
)
824 *min
= (u16
)i40iw_get_ib_wc(opcode
);
825 *maj
= CQE_MAJOR_DRV
;
826 pinfo
->userflushcode
= true;
830 * i40iw_flush_wqes - flush wqe for qp
831 * @iwdev: iwarp device
832 * @iwqp: qp to flush wqes
834 void i40iw_flush_wqes(struct i40iw_device
*iwdev
, struct i40iw_qp
*iwqp
)
836 struct i40iw_qp_flush_info info
;
837 struct i40iw_qp_flush_info
*pinfo
= &info
;
839 struct i40iw_sc_qp
*qp
= &iwqp
->sc_qp
;
841 memset(pinfo
, 0, sizeof(*pinfo
));
844 if (qp
->term_flags
) {
845 i40iw_set_flush_info(pinfo
, &pinfo
->sq_minor_code
,
846 &pinfo
->sq_major_code
, qp
->flush_code
);
847 i40iw_set_flush_info(pinfo
, &pinfo
->rq_minor_code
,
848 &pinfo
->rq_major_code
, qp
->flush_code
);
850 (void)i40iw_hw_flush_wqes(iwdev
, &iwqp
->sc_qp
, &info
, true);