1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
5 static struct irdma_rsrc_limits rsrc_limits_table
[] = {
32 /* types of hmc objects */
33 static enum irdma_hmc_rsrc_type iw_hmc_obj_types
[] = {
38 IRDMA_HMC_IW_APBVT_ENTRY
,
53 IRDMA_HMC_IW_OOISCFFL
,
57 * irdma_iwarp_ce_handler - handle iwarp completions
58 * @iwcq: iwarp cq receiving event
60 static void irdma_iwarp_ce_handler(struct irdma_sc_cq
*iwcq
)
62 struct irdma_cq
*cq
= iwcq
->back_cq
;
65 atomic_set(&cq
->armed
, 0);
66 if (cq
->ibcq
.comp_handler
)
67 cq
->ibcq
.comp_handler(&cq
->ibcq
, cq
->ibcq
.cq_context
);
71 * irdma_puda_ce_handler - handle puda completion events
72 * @rf: RDMA PCI function
73 * @cq: puda completion q for event
75 static void irdma_puda_ce_handler(struct irdma_pci_f
*rf
,
76 struct irdma_sc_cq
*cq
)
78 struct irdma_sc_dev
*dev
= &rf
->sc_dev
;
83 status
= irdma_puda_poll_cmpl(dev
, cq
, &compl_error
);
84 if (status
== -ENOENT
)
87 ibdev_dbg(to_ibdev(dev
), "ERR: puda status = %d\n", status
);
91 ibdev_dbg(to_ibdev(dev
), "ERR: puda compl_err =0x%x\n",
101 * irdma_process_ceq - handle ceq for completions
102 * @rf: RDMA PCI function
103 * @ceq: ceq having cq for completion
105 static void irdma_process_ceq(struct irdma_pci_f
*rf
, struct irdma_ceq
*ceq
)
107 struct irdma_sc_dev
*dev
= &rf
->sc_dev
;
108 struct irdma_sc_ceq
*sc_ceq
;
109 struct irdma_sc_cq
*cq
;
112 sc_ceq
= &ceq
->sc_ceq
;
114 spin_lock_irqsave(&ceq
->ce_lock
, flags
);
115 cq
= irdma_sc_process_ceq(dev
, sc_ceq
);
117 spin_unlock_irqrestore(&ceq
->ce_lock
, flags
);
121 if (cq
->cq_type
== IRDMA_CQ_TYPE_IWARP
)
122 irdma_iwarp_ce_handler(cq
);
124 spin_unlock_irqrestore(&ceq
->ce_lock
, flags
);
126 if (cq
->cq_type
== IRDMA_CQ_TYPE_CQP
)
127 queue_work(rf
->cqp_cmpl_wq
, &rf
->cqp_cmpl_work
);
128 else if (cq
->cq_type
== IRDMA_CQ_TYPE_ILQ
||
129 cq
->cq_type
== IRDMA_CQ_TYPE_IEQ
)
130 irdma_puda_ce_handler(rf
, cq
);
134 static void irdma_set_flush_fields(struct irdma_sc_qp
*qp
,
135 struct irdma_aeqe_info
*info
)
137 qp
->sq_flush_code
= info
->sq
;
138 qp
->rq_flush_code
= info
->rq
;
139 qp
->event_type
= IRDMA_QP_EVENT_CATASTROPHIC
;
141 switch (info
->ae_id
) {
142 case IRDMA_AE_AMP_BOUNDS_VIOLATION
:
143 case IRDMA_AE_AMP_INVALID_STAG
:
144 case IRDMA_AE_AMP_RIGHTS_VIOLATION
:
145 case IRDMA_AE_AMP_UNALLOCATED_STAG
:
146 case IRDMA_AE_AMP_BAD_PD
:
147 case IRDMA_AE_AMP_BAD_QP
:
148 case IRDMA_AE_AMP_BAD_STAG_KEY
:
149 case IRDMA_AE_AMP_BAD_STAG_INDEX
:
150 case IRDMA_AE_AMP_TO_WRAP
:
151 case IRDMA_AE_PRIV_OPERATION_DENIED
:
152 qp
->flush_code
= FLUSH_PROT_ERR
;
153 qp
->event_type
= IRDMA_QP_EVENT_ACCESS_ERR
;
155 case IRDMA_AE_UDA_XMIT_BAD_PD
:
156 case IRDMA_AE_WQE_UNEXPECTED_OPCODE
:
157 qp
->flush_code
= FLUSH_LOC_QP_OP_ERR
;
158 qp
->event_type
= IRDMA_QP_EVENT_CATASTROPHIC
;
160 case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG
:
161 case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT
:
162 case IRDMA_AE_UDA_L4LEN_INVALID
:
163 case IRDMA_AE_DDP_UBE_INVALID_MO
:
164 case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER
:
165 qp
->flush_code
= FLUSH_LOC_LEN_ERR
;
166 qp
->event_type
= IRDMA_QP_EVENT_CATASTROPHIC
;
168 case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS
:
169 case IRDMA_AE_IB_REMOTE_ACCESS_ERROR
:
170 qp
->flush_code
= FLUSH_REM_ACCESS_ERR
;
171 qp
->event_type
= IRDMA_QP_EVENT_ACCESS_ERR
;
173 case IRDMA_AE_LLP_SEGMENT_TOO_SMALL
:
174 case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR
:
175 case IRDMA_AE_ROCE_RSP_LENGTH_ERROR
:
176 case IRDMA_AE_IB_REMOTE_OP_ERROR
:
177 qp
->flush_code
= FLUSH_REM_OP_ERR
;
178 qp
->event_type
= IRDMA_QP_EVENT_CATASTROPHIC
;
180 case IRDMA_AE_LCE_QP_CATASTROPHIC
:
181 qp
->flush_code
= FLUSH_FATAL_ERR
;
182 qp
->event_type
= IRDMA_QP_EVENT_CATASTROPHIC
;
184 case IRDMA_AE_IB_RREQ_AND_Q1_FULL
:
185 qp
->flush_code
= FLUSH_GENERAL_ERR
;
187 case IRDMA_AE_LLP_TOO_MANY_RETRIES
:
188 qp
->flush_code
= FLUSH_RETRY_EXC_ERR
;
189 qp
->event_type
= IRDMA_QP_EVENT_CATASTROPHIC
;
191 case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS
:
192 case IRDMA_AE_AMP_MWBIND_BIND_DISABLED
:
193 case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS
:
194 case IRDMA_AE_AMP_MWBIND_VALID_STAG
:
195 qp
->flush_code
= FLUSH_MW_BIND_ERR
;
196 qp
->event_type
= IRDMA_QP_EVENT_ACCESS_ERR
;
198 case IRDMA_AE_IB_INVALID_REQUEST
:
199 qp
->flush_code
= FLUSH_REM_INV_REQ_ERR
;
200 qp
->event_type
= IRDMA_QP_EVENT_REQ_ERR
;
203 qp
->flush_code
= FLUSH_GENERAL_ERR
;
204 qp
->event_type
= IRDMA_QP_EVENT_CATASTROPHIC
;
210 * irdma_process_aeq - handle aeq events
211 * @rf: RDMA PCI function
213 static void irdma_process_aeq(struct irdma_pci_f
*rf
)
215 struct irdma_sc_dev
*dev
= &rf
->sc_dev
;
216 struct irdma_aeq
*aeq
= &rf
->aeq
;
217 struct irdma_sc_aeq
*sc_aeq
= &aeq
->sc_aeq
;
218 struct irdma_aeqe_info aeinfo
;
219 struct irdma_aeqe_info
*info
= &aeinfo
;
221 struct irdma_qp
*iwqp
= NULL
;
222 struct irdma_cq
*iwcq
= NULL
;
223 struct irdma_sc_qp
*qp
= NULL
;
224 struct irdma_qp_host_ctx_info
*ctx_info
= NULL
;
225 struct irdma_device
*iwdev
= rf
->iwdev
;
234 memset(info
, 0, sizeof(*info
));
235 ret
= irdma_sc_get_next_aeqe(sc_aeq
, info
);
240 ibdev_dbg(&iwdev
->ibdev
,
241 "AEQ: ae_id = 0x%x bool qp=%d qp_id = %d tcp_state=%d iwarp_state=%d ae_src=%d\n",
242 info
->ae_id
, info
->qp
, info
->qp_cq_id
, info
->tcp_state
,
243 info
->iwarp_state
, info
->ae_src
);
246 spin_lock_irqsave(&rf
->qptable_lock
, flags
);
247 iwqp
= rf
->qp_table
[info
->qp_cq_id
];
249 spin_unlock_irqrestore(&rf
->qptable_lock
,
251 if (info
->ae_id
== IRDMA_AE_QP_SUSPEND_COMPLETE
) {
252 atomic_dec(&iwdev
->vsi
.qp_suspend_reqs
);
253 wake_up(&iwdev
->suspend_wq
);
256 ibdev_dbg(&iwdev
->ibdev
, "AEQ: qp_id %d is already freed\n",
260 irdma_qp_add_ref(&iwqp
->ibqp
);
261 spin_unlock_irqrestore(&rf
->qptable_lock
, flags
);
263 spin_lock_irqsave(&iwqp
->lock
, flags
);
264 iwqp
->hw_tcp_state
= info
->tcp_state
;
265 iwqp
->hw_iwarp_state
= info
->iwarp_state
;
266 if (info
->ae_id
!= IRDMA_AE_QP_SUSPEND_COMPLETE
)
267 iwqp
->last_aeq
= info
->ae_id
;
268 spin_unlock_irqrestore(&iwqp
->lock
, flags
);
269 ctx_info
= &iwqp
->ctx_info
;
271 if (info
->ae_id
!= IRDMA_AE_CQ_OPERATION_ERROR
)
275 switch (info
->ae_id
) {
276 struct irdma_cm_node
*cm_node
;
277 case IRDMA_AE_LLP_CONNECTION_ESTABLISHED
:
278 cm_node
= iwqp
->cm_node
;
279 if (cm_node
->accept_pend
) {
280 atomic_dec(&cm_node
->listener
->pend_accepts_cnt
);
281 cm_node
->accept_pend
= 0;
283 iwqp
->rts_ae_rcvd
= 1;
284 wake_up_interruptible(&iwqp
->waitq
);
286 case IRDMA_AE_LLP_FIN_RECEIVED
:
287 case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE
:
290 if (atomic_inc_return(&iwqp
->close_timer_started
) == 1) {
291 iwqp
->hw_tcp_state
= IRDMA_TCP_STATE_CLOSE_WAIT
;
292 if (iwqp
->hw_tcp_state
== IRDMA_TCP_STATE_CLOSE_WAIT
&&
293 iwqp
->ibqp_state
== IB_QPS_RTS
) {
294 irdma_next_iw_state(iwqp
,
295 IRDMA_QP_STATE_CLOSING
,
297 irdma_cm_disconn(iwqp
);
299 irdma_schedule_cm_timer(iwqp
->cm_node
,
300 (struct irdma_puda_buf
*)iwqp
,
301 IRDMA_TIMER_TYPE_CLOSE
,
305 case IRDMA_AE_LLP_CLOSE_COMPLETE
:
307 irdma_terminate_done(qp
, 0);
309 irdma_cm_disconn(iwqp
);
311 case IRDMA_AE_BAD_CLOSE
:
312 case IRDMA_AE_RESET_SENT
:
313 irdma_next_iw_state(iwqp
, IRDMA_QP_STATE_ERROR
, 1, 0,
315 irdma_cm_disconn(iwqp
);
317 case IRDMA_AE_LLP_CONNECTION_RESET
:
318 if (atomic_read(&iwqp
->close_timer_started
))
320 irdma_cm_disconn(iwqp
);
322 case IRDMA_AE_QP_SUSPEND_COMPLETE
:
323 if (iwqp
->iwdev
->vsi
.tc_change_pending
) {
324 if (!atomic_dec_return(&qp
->vsi
->qp_suspend_reqs
))
325 wake_up(&iwqp
->iwdev
->suspend_wq
);
327 if (iwqp
->suspend_pending
) {
328 iwqp
->suspend_pending
= false;
329 wake_up(&iwqp
->iwdev
->suspend_wq
);
332 case IRDMA_AE_TERMINATE_SENT
:
333 irdma_terminate_send_fin(qp
);
335 case IRDMA_AE_LLP_TERMINATE_RECEIVED
:
336 irdma_terminate_received(qp
, info
);
338 case IRDMA_AE_CQ_OPERATION_ERROR
:
339 ibdev_err(&iwdev
->ibdev
,
340 "Processing an iWARP related AE for CQ misc = 0x%04X\n",
343 spin_lock_irqsave(&rf
->cqtable_lock
, flags
);
344 iwcq
= rf
->cq_table
[info
->qp_cq_id
];
346 spin_unlock_irqrestore(&rf
->cqtable_lock
,
348 ibdev_dbg(to_ibdev(dev
),
349 "cq_id %d is already freed\n", info
->qp_cq_id
);
352 irdma_cq_add_ref(&iwcq
->ibcq
);
353 spin_unlock_irqrestore(&rf
->cqtable_lock
, flags
);
355 if (iwcq
->ibcq
.event_handler
) {
356 struct ib_event ibevent
;
358 ibevent
.device
= iwcq
->ibcq
.device
;
359 ibevent
.event
= IB_EVENT_CQ_ERR
;
360 ibevent
.element
.cq
= &iwcq
->ibcq
;
361 iwcq
->ibcq
.event_handler(&ibevent
,
362 iwcq
->ibcq
.cq_context
);
364 irdma_cq_rem_ref(&iwcq
->ibcq
);
366 case IRDMA_AE_RESET_NOT_SENT
:
367 case IRDMA_AE_LLP_DOUBT_REACHABILITY
:
368 case IRDMA_AE_RESOURCE_EXHAUSTION
:
370 case IRDMA_AE_PRIV_OPERATION_DENIED
:
371 case IRDMA_AE_STAG_ZERO_INVALID
:
372 case IRDMA_AE_IB_RREQ_AND_Q1_FULL
:
373 case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION
:
374 case IRDMA_AE_DDP_UBE_INVALID_MO
:
375 case IRDMA_AE_DDP_UBE_INVALID_QN
:
376 case IRDMA_AE_DDP_NO_L_BIT
:
377 case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION
:
378 case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE
:
379 case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST
:
380 case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP
:
381 case IRDMA_AE_INVALID_ARP_ENTRY
:
382 case IRDMA_AE_INVALID_TCP_OPTION_RCVD
:
383 case IRDMA_AE_STALE_ARP_ENTRY
:
384 case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR
:
385 case IRDMA_AE_LLP_SEGMENT_TOO_SMALL
:
386 case IRDMA_AE_LLP_SYN_RECEIVED
:
387 case IRDMA_AE_LLP_TOO_MANY_RETRIES
:
388 case IRDMA_AE_LCE_QP_CATASTROPHIC
:
389 case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC
:
390 case IRDMA_AE_LLP_TOO_MANY_RNRS
:
391 case IRDMA_AE_LCE_CQ_CATASTROPHIC
:
392 case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG
:
394 ibdev_err(&iwdev
->ibdev
, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d, ae_src=%d\n",
395 info
->ae_id
, info
->qp
, info
->qp_cq_id
, info
->ae_src
);
396 if (rdma_protocol_roce(&iwdev
->ibdev
, 1)) {
397 ctx_info
->roce_info
->err_rq_idx_valid
= info
->rq
;
399 ctx_info
->roce_info
->err_rq_idx
= info
->wqe_idx
;
400 irdma_sc_qp_setctx_roce(&iwqp
->sc_qp
, iwqp
->host_ctx
.va
,
403 irdma_set_flush_fields(qp
, info
);
404 irdma_cm_disconn(iwqp
);
407 ctx_info
->iwarp_info
->err_rq_idx_valid
= info
->rq
;
409 ctx_info
->iwarp_info
->err_rq_idx
= info
->wqe_idx
;
410 ctx_info
->tcp_info_valid
= false;
411 ctx_info
->iwarp_info_valid
= true;
412 irdma_sc_qp_setctx(&iwqp
->sc_qp
, iwqp
->host_ctx
.va
,
415 if (iwqp
->hw_iwarp_state
!= IRDMA_QP_STATE_RTS
&&
416 iwqp
->hw_iwarp_state
!= IRDMA_QP_STATE_TERMINATE
) {
417 irdma_next_iw_state(iwqp
, IRDMA_QP_STATE_ERROR
, 1, 0, 0);
418 irdma_cm_disconn(iwqp
);
420 irdma_terminate_connection(qp
, info
);
425 irdma_qp_rem_ref(&iwqp
->ibqp
);
429 irdma_sc_repost_aeq_entries(dev
, aeqcnt
);
433 * irdma_ena_intr - set up device interrupts
434 * @dev: hardware control device structure
435 * @msix_id: id of the interrupt to be enabled
437 static void irdma_ena_intr(struct irdma_sc_dev
*dev
, u32 msix_id
)
439 dev
->irq_ops
->irdma_en_irq(dev
, msix_id
);
443 * irdma_dpc - tasklet for aeq and ceq 0
444 * @t: tasklet_struct ptr
446 static void irdma_dpc(struct tasklet_struct
*t
)
448 struct irdma_pci_f
*rf
= from_tasklet(rf
, t
, dpc_tasklet
);
451 irdma_process_ceq(rf
, rf
->ceqlist
);
452 irdma_process_aeq(rf
);
453 irdma_ena_intr(&rf
->sc_dev
, rf
->iw_msixtbl
[0].idx
);
457 * irdma_ceq_dpc - dpc handler for CEQ
458 * @t: tasklet_struct ptr
460 static void irdma_ceq_dpc(struct tasklet_struct
*t
)
462 struct irdma_ceq
*iwceq
= from_tasklet(iwceq
, t
, dpc_tasklet
);
463 struct irdma_pci_f
*rf
= iwceq
->rf
;
465 irdma_process_ceq(rf
, iwceq
);
466 irdma_ena_intr(&rf
->sc_dev
, iwceq
->msix_idx
);
470 * irdma_save_msix_info - copy msix vector information to iwarp device
471 * @rf: RDMA PCI function
473 * Allocate iwdev msix table and copy the msix info to the table
474 * Return 0 if successful, otherwise return error
476 static int irdma_save_msix_info(struct irdma_pci_f
*rf
)
478 struct irdma_qvlist_info
*iw_qvlist
;
479 struct irdma_qv_info
*iw_qvinfo
;
480 struct msix_entry
*pmsix
;
488 size
= sizeof(struct irdma_msix_vector
) * rf
->msix_count
;
489 size
+= struct_size(iw_qvlist
, qv_info
, rf
->msix_count
);
490 rf
->iw_msixtbl
= kzalloc(size
, GFP_KERNEL
);
494 rf
->iw_qvlist
= (struct irdma_qvlist_info
*)
495 (&rf
->iw_msixtbl
[rf
->msix_count
]);
496 iw_qvlist
= rf
->iw_qvlist
;
497 iw_qvinfo
= iw_qvlist
->qv_info
;
498 iw_qvlist
->num_vectors
= rf
->msix_count
;
499 if (rf
->msix_count
<= num_online_cpus())
500 rf
->msix_shared
= true;
501 else if (rf
->msix_count
> num_online_cpus() + 1)
502 rf
->msix_count
= num_online_cpus() + 1;
504 pmsix
= rf
->msix_entries
;
505 for (i
= 0, ceq_idx
= 0; i
< rf
->msix_count
; i
++, iw_qvinfo
++) {
506 rf
->iw_msixtbl
[i
].idx
= pmsix
->entry
;
507 rf
->iw_msixtbl
[i
].irq
= pmsix
->vector
;
508 rf
->iw_msixtbl
[i
].cpu_affinity
= ceq_idx
;
510 iw_qvinfo
->aeq_idx
= 0;
512 iw_qvinfo
->ceq_idx
= ceq_idx
++;
514 iw_qvinfo
->ceq_idx
= IRDMA_Q_INVALID_IDX
;
516 iw_qvinfo
->aeq_idx
= IRDMA_Q_INVALID_IDX
;
517 iw_qvinfo
->ceq_idx
= ceq_idx
++;
519 iw_qvinfo
->itr_idx
= 3;
520 iw_qvinfo
->v_idx
= rf
->iw_msixtbl
[i
].idx
;
528 * irdma_irq_handler - interrupt handler for aeq and ceq0
529 * @irq: Interrupt request number
530 * @data: RDMA PCI function
532 static irqreturn_t
irdma_irq_handler(int irq
, void *data
)
534 struct irdma_pci_f
*rf
= data
;
536 tasklet_schedule(&rf
->dpc_tasklet
);
542 * irdma_ceq_handler - interrupt handler for ceq
543 * @irq: interrupt request number
546 static irqreturn_t
irdma_ceq_handler(int irq
, void *data
)
548 struct irdma_ceq
*iwceq
= data
;
550 if (iwceq
->irq
!= irq
)
551 ibdev_err(to_ibdev(&iwceq
->rf
->sc_dev
), "expected irq = %d received irq = %d\n",
553 tasklet_schedule(&iwceq
->dpc_tasklet
);
559 * irdma_destroy_irq - destroy device interrupts
560 * @rf: RDMA PCI function
561 * @msix_vec: msix vector to disable irq
562 * @dev_id: parameter to pass to free_irq (used during irq setup)
564 * The function is called when destroying aeq/ceq
566 static void irdma_destroy_irq(struct irdma_pci_f
*rf
,
567 struct irdma_msix_vector
*msix_vec
, void *dev_id
)
569 struct irdma_sc_dev
*dev
= &rf
->sc_dev
;
571 dev
->irq_ops
->irdma_dis_irq(dev
, msix_vec
->idx
);
572 irq_update_affinity_hint(msix_vec
->irq
, NULL
);
573 free_irq(msix_vec
->irq
, dev_id
);
575 tasklet_kill(&rf
->dpc_tasklet
);
577 struct irdma_ceq
*iwceq
= (struct irdma_ceq
*)dev_id
;
579 tasklet_kill(&iwceq
->dpc_tasklet
);
584 * irdma_destroy_cqp - destroy control qp
585 * @rf: RDMA PCI function
587 * Issue destroy cqp request and
588 * free the resources associated with the cqp
590 static void irdma_destroy_cqp(struct irdma_pci_f
*rf
)
592 struct irdma_sc_dev
*dev
= &rf
->sc_dev
;
593 struct irdma_cqp
*cqp
= &rf
->cqp
;
596 status
= irdma_sc_cqp_destroy(dev
->cqp
);
598 ibdev_dbg(to_ibdev(dev
), "ERR: Destroy CQP failed %d\n", status
);
600 irdma_cleanup_pending_cqp_op(rf
);
601 dma_free_coherent(dev
->hw
->device
, cqp
->sq
.size
, cqp
->sq
.va
,
604 kfree(cqp
->scratch_array
);
605 cqp
->scratch_array
= NULL
;
606 kfree(cqp
->cqp_requests
);
607 cqp
->cqp_requests
= NULL
;
610 static void irdma_destroy_virt_aeq(struct irdma_pci_f
*rf
)
612 struct irdma_aeq
*aeq
= &rf
->aeq
;
613 u32 pg_cnt
= DIV_ROUND_UP(aeq
->mem
.size
, PAGE_SIZE
);
614 dma_addr_t
*pg_arr
= (dma_addr_t
*)aeq
->palloc
.level1
.addr
;
616 irdma_unmap_vm_page_list(&rf
->hw
, pg_arr
, pg_cnt
);
617 irdma_free_pble(rf
->pble_rsrc
, &aeq
->palloc
);
622 * irdma_destroy_aeq - destroy aeq
623 * @rf: RDMA PCI function
625 * Issue a destroy aeq request and
626 * free the resources associated with the aeq
627 * The function is called during driver unload
629 static void irdma_destroy_aeq(struct irdma_pci_f
*rf
)
631 struct irdma_sc_dev
*dev
= &rf
->sc_dev
;
632 struct irdma_aeq
*aeq
= &rf
->aeq
;
635 if (!rf
->msix_shared
) {
636 rf
->sc_dev
.irq_ops
->irdma_cfg_aeq(&rf
->sc_dev
, rf
->iw_msixtbl
->idx
, false);
637 irdma_destroy_irq(rf
, rf
->iw_msixtbl
, rf
);
642 aeq
->sc_aeq
.size
= 0;
643 status
= irdma_cqp_aeq_cmd(dev
, &aeq
->sc_aeq
, IRDMA_OP_AEQ_DESTROY
);
645 ibdev_dbg(to_ibdev(dev
), "ERR: Destroy AEQ failed %d\n", status
);
648 if (aeq
->virtual_map
) {
649 irdma_destroy_virt_aeq(rf
);
651 dma_free_coherent(dev
->hw
->device
, aeq
->mem
.size
, aeq
->mem
.va
,
658 * irdma_destroy_ceq - destroy ceq
659 * @rf: RDMA PCI function
660 * @iwceq: ceq to be destroyed
662 * Issue a destroy ceq request and
663 * free the resources associated with the ceq
665 static void irdma_destroy_ceq(struct irdma_pci_f
*rf
, struct irdma_ceq
*iwceq
)
667 struct irdma_sc_dev
*dev
= &rf
->sc_dev
;
673 status
= irdma_sc_ceq_destroy(&iwceq
->sc_ceq
, 0, 1);
675 ibdev_dbg(to_ibdev(dev
), "ERR: CEQ destroy command failed %d\n", status
);
679 status
= irdma_sc_cceq_destroy_done(&iwceq
->sc_ceq
);
681 ibdev_dbg(to_ibdev(dev
), "ERR: CEQ destroy completion failed %d\n",
684 dma_free_coherent(dev
->hw
->device
, iwceq
->mem
.size
, iwceq
->mem
.va
,
686 iwceq
->mem
.va
= NULL
;
690 * irdma_del_ceq_0 - destroy ceq 0
691 * @rf: RDMA PCI function
693 * Disable the ceq 0 interrupt and destroy the ceq 0
695 static void irdma_del_ceq_0(struct irdma_pci_f
*rf
)
697 struct irdma_ceq
*iwceq
= rf
->ceqlist
;
698 struct irdma_msix_vector
*msix_vec
;
700 if (rf
->msix_shared
) {
701 msix_vec
= &rf
->iw_msixtbl
[0];
702 rf
->sc_dev
.irq_ops
->irdma_cfg_ceq(&rf
->sc_dev
,
704 msix_vec
->idx
, false);
705 irdma_destroy_irq(rf
, msix_vec
, rf
);
707 msix_vec
= &rf
->iw_msixtbl
[1];
708 irdma_destroy_irq(rf
, msix_vec
, iwceq
);
711 irdma_destroy_ceq(rf
, iwceq
);
712 rf
->sc_dev
.ceq_valid
= false;
717 * irdma_del_ceqs - destroy all ceq's except CEQ 0
718 * @rf: RDMA PCI function
720 * Go through all of the device ceq's, except 0, and for each
721 * ceq disable the ceq interrupt and destroy the ceq
723 static void irdma_del_ceqs(struct irdma_pci_f
*rf
)
725 struct irdma_ceq
*iwceq
= &rf
->ceqlist
[1];
726 struct irdma_msix_vector
*msix_vec
;
730 msix_vec
= &rf
->iw_msixtbl
[1];
732 msix_vec
= &rf
->iw_msixtbl
[2];
734 for (i
= 1; i
< rf
->ceqs_count
; i
++, msix_vec
++, iwceq
++) {
735 rf
->sc_dev
.irq_ops
->irdma_cfg_ceq(&rf
->sc_dev
, msix_vec
->ceq_id
,
736 msix_vec
->idx
, false);
737 irdma_destroy_irq(rf
, msix_vec
, iwceq
);
738 irdma_cqp_ceq_cmd(&rf
->sc_dev
, &iwceq
->sc_ceq
,
739 IRDMA_OP_CEQ_DESTROY
);
740 dma_free_coherent(rf
->sc_dev
.hw
->device
, iwceq
->mem
.size
,
741 iwceq
->mem
.va
, iwceq
->mem
.pa
);
742 iwceq
->mem
.va
= NULL
;
748 * irdma_destroy_ccq - destroy control cq
749 * @rf: RDMA PCI function
751 * Issue destroy ccq request and
752 * free the resources associated with the ccq
754 static void irdma_destroy_ccq(struct irdma_pci_f
*rf
)
756 struct irdma_sc_dev
*dev
= &rf
->sc_dev
;
757 struct irdma_ccq
*ccq
= &rf
->ccq
;
761 destroy_workqueue(rf
->cqp_cmpl_wq
);
764 status
= irdma_sc_ccq_destroy(dev
->ccq
, 0, true);
766 ibdev_dbg(to_ibdev(dev
), "ERR: CCQ destroy failed %d\n", status
);
767 dma_free_coherent(dev
->hw
->device
, ccq
->mem_cq
.size
, ccq
->mem_cq
.va
,
769 ccq
->mem_cq
.va
= NULL
;
773 * irdma_close_hmc_objects_type - delete hmc objects of a given type
775 * @obj_type: the hmc object type to be deleted
776 * @hmc_info: host memory info struct
777 * @privileged: permission to close HMC objects
778 * @reset: true if called before reset
780 static void irdma_close_hmc_objects_type(struct irdma_sc_dev
*dev
,
781 enum irdma_hmc_rsrc_type obj_type
,
782 struct irdma_hmc_info
*hmc_info
,
783 bool privileged
, bool reset
)
785 struct irdma_hmc_del_obj_info info
= {};
787 info
.hmc_info
= hmc_info
;
788 info
.rsrc_type
= obj_type
;
789 info
.count
= hmc_info
->hmc_obj
[obj_type
].cnt
;
790 info
.privileged
= privileged
;
791 if (irdma_sc_del_hmc_obj(dev
, &info
, reset
))
792 ibdev_dbg(to_ibdev(dev
), "ERR: del HMC obj of type %d failed\n",
797 * irdma_del_hmc_objects - remove all device hmc objects
799 * @hmc_info: hmc_info to free
800 * @privileged: permission to delete HMC objects
801 * @reset: true if called before reset
802 * @vers: hardware version
804 static void irdma_del_hmc_objects(struct irdma_sc_dev
*dev
,
805 struct irdma_hmc_info
*hmc_info
, bool privileged
,
806 bool reset
, enum irdma_vers vers
)
810 for (i
= 0; i
< IW_HMC_OBJ_TYPE_NUM
; i
++) {
811 if (dev
->hmc_info
->hmc_obj
[iw_hmc_obj_types
[i
]].cnt
)
812 irdma_close_hmc_objects_type(dev
, iw_hmc_obj_types
[i
],
813 hmc_info
, privileged
, reset
);
814 if (vers
== IRDMA_GEN_1
&& i
== IRDMA_HMC_IW_TIMER
)
820 * irdma_create_hmc_obj_type - create hmc object of a given type
821 * @dev: hardware control device structure
822 * @info: information for the hmc object to create
824 static int irdma_create_hmc_obj_type(struct irdma_sc_dev
*dev
,
825 struct irdma_hmc_create_obj_info
*info
)
827 return irdma_sc_create_hmc_obj(dev
, info
);
831 * irdma_create_hmc_objs - create all hmc objects for the device
832 * @rf: RDMA PCI function
833 * @privileged: permission to create HMC objects
836 * Create the device hmc objects and allocate hmc pages
837 * Return 0 if successful, otherwise clean up and return error
839 static int irdma_create_hmc_objs(struct irdma_pci_f
*rf
, bool privileged
,
840 enum irdma_vers vers
)
842 struct irdma_sc_dev
*dev
= &rf
->sc_dev
;
843 struct irdma_hmc_create_obj_info info
= {};
846 info
.hmc_info
= dev
->hmc_info
;
847 info
.privileged
= privileged
;
848 info
.entry_type
= rf
->sd_type
;
850 for (i
= 0; i
< IW_HMC_OBJ_TYPE_NUM
; i
++) {
851 if (iw_hmc_obj_types
[i
] == IRDMA_HMC_IW_PBLE
)
853 if (dev
->hmc_info
->hmc_obj
[iw_hmc_obj_types
[i
]].cnt
) {
854 info
.rsrc_type
= iw_hmc_obj_types
[i
];
855 info
.count
= dev
->hmc_info
->hmc_obj
[info
.rsrc_type
].cnt
;
857 status
= irdma_create_hmc_obj_type(dev
, &info
);
859 ibdev_dbg(to_ibdev(dev
),
860 "ERR: create obj type %d status = %d\n",
861 iw_hmc_obj_types
[i
], status
);
865 if (vers
== IRDMA_GEN_1
&& i
== IRDMA_HMC_IW_TIMER
)
870 return irdma_sc_static_hmc_pages_allocated(dev
->cqp
, 0, dev
->hmc_fn_id
,
875 /* destroy the hmc objects of a given type */
876 if (dev
->hmc_info
->hmc_obj
[iw_hmc_obj_types
[i
]].cnt
)
877 irdma_close_hmc_objects_type(dev
, iw_hmc_obj_types
[i
],
878 dev
->hmc_info
, privileged
,
886 * irdma_obj_aligned_mem - get aligned memory from device allocated memory
887 * @rf: RDMA PCI function
888 * @memptr: points to the memory addresses
889 * @size: size of memory needed
890 * @mask: mask for the aligned memory
892 * Get aligned memory of the requested size and
893 * update the memptr to point to the new aligned memory
894 * Return 0 if successful, otherwise return no memory error
896 static int irdma_obj_aligned_mem(struct irdma_pci_f
*rf
,
897 struct irdma_dma_mem
*memptr
, u32 size
,
900 unsigned long va
, newva
;
903 va
= (unsigned long)rf
->obj_next
.va
;
906 newva
= ALIGN(va
, (unsigned long)mask
+ 1ULL);
908 memptr
->va
= (u8
*)va
+ extra
;
909 memptr
->pa
= rf
->obj_next
.pa
+ extra
;
911 if (((u8
*)memptr
->va
+ size
) > ((u8
*)rf
->obj_mem
.va
+ rf
->obj_mem
.size
))
914 rf
->obj_next
.va
= (u8
*)memptr
->va
+ size
;
915 rf
->obj_next
.pa
= memptr
->pa
+ size
;
921 * irdma_create_cqp - create control qp
922 * @rf: RDMA PCI function
924 * Return 0, if the cqp and all the resources associated with it
925 * are successfully created, otherwise return error
927 static int irdma_create_cqp(struct irdma_pci_f
*rf
)
929 u32 sqsize
= IRDMA_CQP_SW_SQSIZE_2048
;
930 struct irdma_dma_mem mem
;
931 struct irdma_sc_dev
*dev
= &rf
->sc_dev
;
932 struct irdma_cqp_init_info cqp_init_info
= {};
933 struct irdma_cqp
*cqp
= &rf
->cqp
;
934 u16 maj_err
, min_err
;
937 cqp
->cqp_requests
= kcalloc(sqsize
, sizeof(*cqp
->cqp_requests
), GFP_KERNEL
);
938 if (!cqp
->cqp_requests
)
941 cqp
->scratch_array
= kcalloc(sqsize
, sizeof(*cqp
->scratch_array
), GFP_KERNEL
);
942 if (!cqp
->scratch_array
) {
947 dev
->cqp
= &cqp
->sc_cqp
;
949 cqp
->sq
.size
= ALIGN(sizeof(struct irdma_cqp_sq_wqe
) * sqsize
,
950 IRDMA_CQP_ALIGNMENT
);
951 cqp
->sq
.va
= dma_alloc_coherent(dev
->hw
->device
, cqp
->sq
.size
,
952 &cqp
->sq
.pa
, GFP_KERNEL
);
958 status
= irdma_obj_aligned_mem(rf
, &mem
, sizeof(struct irdma_cqp_ctx
),
959 IRDMA_HOST_CTX_ALIGNMENT_M
);
963 dev
->cqp
->host_ctx_pa
= mem
.pa
;
964 dev
->cqp
->host_ctx
= mem
.va
;
965 /* populate the cqp init info */
966 cqp_init_info
.dev
= dev
;
967 cqp_init_info
.sq_size
= sqsize
;
968 cqp_init_info
.sq
= cqp
->sq
.va
;
969 cqp_init_info
.sq_pa
= cqp
->sq
.pa
;
970 cqp_init_info
.host_ctx_pa
= mem
.pa
;
971 cqp_init_info
.host_ctx
= mem
.va
;
972 cqp_init_info
.hmc_profile
= rf
->rsrc_profile
;
973 cqp_init_info
.scratch_array
= cqp
->scratch_array
;
974 cqp_init_info
.protocol_used
= rf
->protocol_used
;
976 switch (rf
->rdma_ver
) {
978 cqp_init_info
.hw_maj_ver
= IRDMA_CQPHC_HW_MAJVER_GEN_1
;
981 cqp_init_info
.hw_maj_ver
= IRDMA_CQPHC_HW_MAJVER_GEN_2
;
984 status
= irdma_sc_cqp_init(dev
->cqp
, &cqp_init_info
);
986 ibdev_dbg(to_ibdev(dev
), "ERR: cqp init status %d\n", status
);
990 spin_lock_init(&cqp
->req_lock
);
991 spin_lock_init(&cqp
->compl_lock
);
993 status
= irdma_sc_cqp_create(dev
->cqp
, &maj_err
, &min_err
);
995 ibdev_dbg(to_ibdev(dev
),
996 "ERR: cqp create failed - status %d maj_err %d min_err %d\n",
997 status
, maj_err
, min_err
);
1001 INIT_LIST_HEAD(&cqp
->cqp_avail_reqs
);
1002 INIT_LIST_HEAD(&cqp
->cqp_pending_reqs
);
1004 /* init the waitqueue of the cqp_requests and add them to the list */
1005 for (i
= 0; i
< sqsize
; i
++) {
1006 init_waitqueue_head(&cqp
->cqp_requests
[i
].waitq
);
1007 list_add_tail(&cqp
->cqp_requests
[i
].list
, &cqp
->cqp_avail_reqs
);
1009 init_waitqueue_head(&cqp
->remove_wq
);
1013 dma_free_coherent(dev
->hw
->device
, cqp
->sq
.size
,
1014 cqp
->sq
.va
, cqp
->sq
.pa
);
1017 kfree(cqp
->scratch_array
);
1018 cqp
->scratch_array
= NULL
;
1020 kfree(cqp
->cqp_requests
);
1021 cqp
->cqp_requests
= NULL
;
1027 * irdma_create_ccq - create control cq
1028 * @rf: RDMA PCI function
1030 * Return 0, if the ccq and the resources associated with it
1031 * are successfully created, otherwise return error
1033 static int irdma_create_ccq(struct irdma_pci_f
*rf
)
1035 struct irdma_sc_dev
*dev
= &rf
->sc_dev
;
1036 struct irdma_ccq_init_info info
= {};
1037 struct irdma_ccq
*ccq
= &rf
->ccq
;
1040 dev
->ccq
= &ccq
->sc_cq
;
1041 dev
->ccq
->dev
= dev
;
1043 ccq
->shadow_area
.size
= sizeof(struct irdma_cq_shadow_area
);
1044 ccq
->mem_cq
.size
= ALIGN(sizeof(struct irdma_cqe
) * IW_CCQ_SIZE
,
1045 IRDMA_CQ0_ALIGNMENT
);
1046 ccq
->mem_cq
.va
= dma_alloc_coherent(dev
->hw
->device
, ccq
->mem_cq
.size
,
1047 &ccq
->mem_cq
.pa
, GFP_KERNEL
);
1048 if (!ccq
->mem_cq
.va
)
1051 status
= irdma_obj_aligned_mem(rf
, &ccq
->shadow_area
,
1052 ccq
->shadow_area
.size
,
1053 IRDMA_SHADOWAREA_M
);
1057 ccq
->sc_cq
.back_cq
= ccq
;
1058 /* populate the ccq init info */
1059 info
.cq_base
= ccq
->mem_cq
.va
;
1060 info
.cq_pa
= ccq
->mem_cq
.pa
;
1061 info
.num_elem
= IW_CCQ_SIZE
;
1062 info
.shadow_area
= ccq
->shadow_area
.va
;
1063 info
.shadow_area_pa
= ccq
->shadow_area
.pa
;
1064 info
.ceqe_mask
= false;
1065 info
.ceq_id_valid
= true;
1066 info
.shadow_read_threshold
= 16;
1067 info
.vsi
= &rf
->default_vsi
;
1068 status
= irdma_sc_ccq_init(dev
->ccq
, &info
);
1070 status
= irdma_sc_ccq_create(dev
->ccq
, 0, true, true);
1073 dma_free_coherent(dev
->hw
->device
, ccq
->mem_cq
.size
,
1074 ccq
->mem_cq
.va
, ccq
->mem_cq
.pa
);
1075 ccq
->mem_cq
.va
= NULL
;
1082 * irdma_alloc_set_mac - set up a mac address table entry
1083 * @iwdev: irdma device
1085 * Allocate a mac ip entry and add it to the hw table Return 0
1086 * if successful, otherwise return error
1088 static int irdma_alloc_set_mac(struct irdma_device
*iwdev
)
1092 status
= irdma_alloc_local_mac_entry(iwdev
->rf
,
1093 &iwdev
->mac_ip_table_idx
);
1095 status
= irdma_add_local_mac_entry(iwdev
->rf
,
1096 (const u8
*)iwdev
->netdev
->dev_addr
,
1097 (u8
)iwdev
->mac_ip_table_idx
);
1099 irdma_del_local_mac_entry(iwdev
->rf
,
1100 (u8
)iwdev
->mac_ip_table_idx
);
1106 * irdma_cfg_ceq_vector - set up the msix interrupt vector for
1108 * @rf: RDMA PCI function
1109 * @iwceq: ceq associated with the vector
1110 * @ceq_id: the id number of the iwceq
1111 * @msix_vec: interrupt vector information
1113 * Allocate interrupt resources and enable irq handling
1114 * Return 0 if successful, otherwise return error
1116 static int irdma_cfg_ceq_vector(struct irdma_pci_f
*rf
, struct irdma_ceq
*iwceq
,
1117 u32 ceq_id
, struct irdma_msix_vector
*msix_vec
)
1121 if (rf
->msix_shared
&& !ceq_id
) {
1122 snprintf(msix_vec
->name
, sizeof(msix_vec
->name
) - 1,
1123 "irdma-%s-AEQCEQ-0", dev_name(&rf
->pcidev
->dev
));
1124 tasklet_setup(&rf
->dpc_tasklet
, irdma_dpc
);
1125 status
= request_irq(msix_vec
->irq
, irdma_irq_handler
, 0,
1126 msix_vec
->name
, rf
);
1128 snprintf(msix_vec
->name
, sizeof(msix_vec
->name
) - 1,
1130 dev_name(&rf
->pcidev
->dev
), ceq_id
);
1131 tasklet_setup(&iwceq
->dpc_tasklet
, irdma_ceq_dpc
);
1133 status
= request_irq(msix_vec
->irq
, irdma_ceq_handler
, 0,
1134 msix_vec
->name
, iwceq
);
1136 cpumask_clear(&msix_vec
->mask
);
1137 cpumask_set_cpu(msix_vec
->cpu_affinity
, &msix_vec
->mask
);
1138 irq_update_affinity_hint(msix_vec
->irq
, &msix_vec
->mask
);
1140 ibdev_dbg(&rf
->iwdev
->ibdev
, "ERR: ceq irq config fail\n");
1144 msix_vec
->ceq_id
= ceq_id
;
1145 rf
->sc_dev
.irq_ops
->irdma_cfg_ceq(&rf
->sc_dev
, ceq_id
, msix_vec
->idx
, true);
1151 * irdma_cfg_aeq_vector - set up the msix vector for aeq
1152 * @rf: RDMA PCI function
1154 * Allocate interrupt resources and enable irq handling
1155 * Return 0 if successful, otherwise return error
1157 static int irdma_cfg_aeq_vector(struct irdma_pci_f
*rf
)
1159 struct irdma_msix_vector
*msix_vec
= rf
->iw_msixtbl
;
1162 if (!rf
->msix_shared
) {
1163 snprintf(msix_vec
->name
, sizeof(msix_vec
->name
) - 1,
1164 "irdma-%s-AEQ", dev_name(&rf
->pcidev
->dev
));
1165 tasklet_setup(&rf
->dpc_tasklet
, irdma_dpc
);
1166 ret
= request_irq(msix_vec
->irq
, irdma_irq_handler
, 0,
1167 msix_vec
->name
, rf
);
1170 ibdev_dbg(&rf
->iwdev
->ibdev
, "ERR: aeq irq config fail\n");
1174 rf
->sc_dev
.irq_ops
->irdma_cfg_aeq(&rf
->sc_dev
, msix_vec
->idx
, true);
1180 * irdma_create_ceq - create completion event queue
1181 * @rf: RDMA PCI function
1182 * @iwceq: pointer to the ceq resources to be created
1183 * @ceq_id: the id number of the iwceq
1184 * @vsi: SC vsi struct
1186 * Return 0, if the ceq and the resources associated with it
1187 * are successfully created, otherwise return error
1189 static int irdma_create_ceq(struct irdma_pci_f
*rf
, struct irdma_ceq
*iwceq
,
1190 u32 ceq_id
, struct irdma_sc_vsi
*vsi
)
1193 struct irdma_ceq_init_info info
= {};
1194 struct irdma_sc_dev
*dev
= &rf
->sc_dev
;
1197 info
.ceq_id
= ceq_id
;
1199 ceq_size
= min(rf
->sc_dev
.hmc_info
->hmc_obj
[IRDMA_HMC_IW_CQ
].cnt
,
1200 dev
->hw_attrs
.max_hw_ceq_size
);
1201 iwceq
->mem
.size
= ALIGN(sizeof(struct irdma_ceqe
) * ceq_size
,
1202 IRDMA_CEQ_ALIGNMENT
);
1203 iwceq
->mem
.va
= dma_alloc_coherent(dev
->hw
->device
, iwceq
->mem
.size
,
1204 &iwceq
->mem
.pa
, GFP_KERNEL
);
1208 info
.ceq_id
= ceq_id
;
1209 info
.ceqe_base
= iwceq
->mem
.va
;
1210 info
.ceqe_pa
= iwceq
->mem
.pa
;
1211 info
.elem_cnt
= ceq_size
;
1212 iwceq
->sc_ceq
.ceq_id
= ceq_id
;
1215 status
= irdma_sc_ceq_init(&iwceq
->sc_ceq
, &info
);
1218 status
= irdma_cqp_ceq_cmd(&rf
->sc_dev
, &iwceq
->sc_ceq
,
1219 IRDMA_OP_CEQ_CREATE
);
1221 status
= irdma_sc_cceq_create(&iwceq
->sc_ceq
, 0);
1225 dma_free_coherent(dev
->hw
->device
, iwceq
->mem
.size
,
1226 iwceq
->mem
.va
, iwceq
->mem
.pa
);
1227 iwceq
->mem
.va
= NULL
;
1234 * irdma_setup_ceq_0 - create CEQ 0 and it's interrupt resource
1235 * @rf: RDMA PCI function
1237 * Allocate a list for all device completion event queues
1238 * Create the ceq 0 and configure it's msix interrupt vector
1239 * Return 0, if successfully set up, otherwise return error
1241 static int irdma_setup_ceq_0(struct irdma_pci_f
*rf
)
1243 struct irdma_ceq
*iwceq
;
1244 struct irdma_msix_vector
*msix_vec
;
1249 num_ceqs
= min(rf
->msix_count
, rf
->sc_dev
.hmc_fpm_misc
.max_ceqs
);
1250 rf
->ceqlist
= kcalloc(num_ceqs
, sizeof(*rf
->ceqlist
), GFP_KERNEL
);
1256 iwceq
= &rf
->ceqlist
[0];
1257 status
= irdma_create_ceq(rf
, iwceq
, 0, &rf
->default_vsi
);
1259 ibdev_dbg(&rf
->iwdev
->ibdev
, "ERR: create ceq status = %d\n",
1264 spin_lock_init(&iwceq
->ce_lock
);
1265 i
= rf
->msix_shared
? 0 : 1;
1266 msix_vec
= &rf
->iw_msixtbl
[i
];
1267 iwceq
->irq
= msix_vec
->irq
;
1268 iwceq
->msix_idx
= msix_vec
->idx
;
1269 status
= irdma_cfg_ceq_vector(rf
, iwceq
, 0, msix_vec
);
1271 irdma_destroy_ceq(rf
, iwceq
);
1275 irdma_ena_intr(&rf
->sc_dev
, msix_vec
->idx
);
1279 if (status
&& !rf
->ceqs_count
) {
1284 rf
->sc_dev
.ceq_valid
= true;
1290 * irdma_setup_ceqs - manage the device ceq's and their interrupt resources
1291 * @rf: RDMA PCI function
1292 * @vsi: VSI structure for this CEQ
1294 * Allocate a list for all device completion event queues
1295 * Create the ceq's and configure their msix interrupt vectors
1296 * Return 0, if ceqs are successfully set up, otherwise return error
1298 static int irdma_setup_ceqs(struct irdma_pci_f
*rf
, struct irdma_sc_vsi
*vsi
)
1302 struct irdma_ceq
*iwceq
;
1303 struct irdma_msix_vector
*msix_vec
;
1307 num_ceqs
= min(rf
->msix_count
, rf
->sc_dev
.hmc_fpm_misc
.max_ceqs
);
1308 i
= (rf
->msix_shared
) ? 1 : 2;
1309 for (ceq_id
= 1; i
< num_ceqs
; i
++, ceq_id
++) {
1310 iwceq
= &rf
->ceqlist
[ceq_id
];
1311 status
= irdma_create_ceq(rf
, iwceq
, ceq_id
, vsi
);
1313 ibdev_dbg(&rf
->iwdev
->ibdev
,
1314 "ERR: create ceq status = %d\n", status
);
1317 spin_lock_init(&iwceq
->ce_lock
);
1318 msix_vec
= &rf
->iw_msixtbl
[i
];
1319 iwceq
->irq
= msix_vec
->irq
;
1320 iwceq
->msix_idx
= msix_vec
->idx
;
1321 status
= irdma_cfg_ceq_vector(rf
, iwceq
, ceq_id
, msix_vec
);
1323 irdma_destroy_ceq(rf
, iwceq
);
1326 irdma_ena_intr(&rf
->sc_dev
, msix_vec
->idx
);
1338 static int irdma_create_virt_aeq(struct irdma_pci_f
*rf
, u32 size
)
1340 struct irdma_aeq
*aeq
= &rf
->aeq
;
1345 if (rf
->rdma_ver
< IRDMA_GEN_2
)
1348 aeq
->mem
.size
= sizeof(struct irdma_sc_aeqe
) * size
;
1349 aeq
->mem
.va
= vzalloc(aeq
->mem
.size
);
1354 pg_cnt
= DIV_ROUND_UP(aeq
->mem
.size
, PAGE_SIZE
);
1355 status
= irdma_get_pble(rf
->pble_rsrc
, &aeq
->palloc
, pg_cnt
, true);
1361 pg_arr
= (dma_addr_t
*)aeq
->palloc
.level1
.addr
;
1362 status
= irdma_map_vm_page_list(&rf
->hw
, aeq
->mem
.va
, pg_arr
, pg_cnt
);
1364 irdma_free_pble(rf
->pble_rsrc
, &aeq
->palloc
);
1373 * irdma_create_aeq - create async event queue
1374 * @rf: RDMA PCI function
1376 * Return 0, if the aeq and the resources associated with it
1377 * are successfully created, otherwise return error
1379 static int irdma_create_aeq(struct irdma_pci_f
*rf
)
1381 struct irdma_aeq_init_info info
= {};
1382 struct irdma_sc_dev
*dev
= &rf
->sc_dev
;
1383 struct irdma_aeq
*aeq
= &rf
->aeq
;
1384 struct irdma_hmc_info
*hmc_info
= rf
->sc_dev
.hmc_info
;
1386 u8 multiplier
= (rf
->protocol_used
== IRDMA_IWARP_PROTOCOL_ONLY
) ? 2 : 1;
1389 aeq_size
= multiplier
* hmc_info
->hmc_obj
[IRDMA_HMC_IW_QP
].cnt
+
1390 hmc_info
->hmc_obj
[IRDMA_HMC_IW_CQ
].cnt
;
1391 aeq_size
= min(aeq_size
, dev
->hw_attrs
.max_hw_aeq_size
);
1393 aeq
->mem
.size
= ALIGN(sizeof(struct irdma_sc_aeqe
) * aeq_size
,
1394 IRDMA_AEQ_ALIGNMENT
);
1395 aeq
->mem
.va
= dma_alloc_coherent(dev
->hw
->device
, aeq
->mem
.size
,
1397 GFP_KERNEL
| __GFP_NOWARN
);
1401 /* physically mapped aeq failed. setup virtual aeq */
1402 status
= irdma_create_virt_aeq(rf
, aeq_size
);
1406 info
.virtual_map
= true;
1407 aeq
->virtual_map
= info
.virtual_map
;
1408 info
.pbl_chunk_size
= 1;
1409 info
.first_pm_pbl_idx
= aeq
->palloc
.level1
.idx
;
1412 info
.aeqe_base
= aeq
->mem
.va
;
1413 info
.aeq_elem_pa
= aeq
->mem
.pa
;
1414 info
.elem_cnt
= aeq_size
;
1416 info
.msix_idx
= rf
->iw_msixtbl
->idx
;
1417 status
= irdma_sc_aeq_init(&aeq
->sc_aeq
, &info
);
1421 status
= irdma_cqp_aeq_cmd(dev
, &aeq
->sc_aeq
, IRDMA_OP_AEQ_CREATE
);
1428 if (aeq
->virtual_map
) {
1429 irdma_destroy_virt_aeq(rf
);
1431 dma_free_coherent(dev
->hw
->device
, aeq
->mem
.size
, aeq
->mem
.va
,
1440 * irdma_setup_aeq - set up the device aeq
1441 * @rf: RDMA PCI function
1443 * Create the aeq and configure its msix interrupt vector
1444 * Return 0 if successful, otherwise return error
1446 static int irdma_setup_aeq(struct irdma_pci_f
*rf
)
1448 struct irdma_sc_dev
*dev
= &rf
->sc_dev
;
1451 status
= irdma_create_aeq(rf
);
1455 status
= irdma_cfg_aeq_vector(rf
);
1457 irdma_destroy_aeq(rf
);
1461 if (!rf
->msix_shared
)
1462 irdma_ena_intr(dev
, rf
->iw_msixtbl
[0].idx
);
1468 * irdma_initialize_ilq - create iwarp local queue for cm
1469 * @iwdev: irdma device
1471 * Return 0 if successful, otherwise return error
1473 static int irdma_initialize_ilq(struct irdma_device
*iwdev
)
1475 struct irdma_puda_rsrc_info info
= {};
1478 info
.type
= IRDMA_PUDA_RSRC_TYPE_ILQ
;
1483 info
.abi_ver
= IRDMA_ABI_VER
;
1484 info
.sq_size
= min(iwdev
->rf
->max_qp
/ 2, (u32
)32768);
1485 info
.rq_size
= info
.sq_size
;
1486 info
.buf_size
= 1024;
1487 info
.tx_buf_cnt
= 2 * info
.sq_size
;
1488 info
.receive
= irdma_receive_ilq
;
1489 info
.xmit_complete
= irdma_free_sqbuf
;
1490 status
= irdma_puda_create_rsrc(&iwdev
->vsi
, &info
);
1492 ibdev_dbg(&iwdev
->ibdev
, "ERR: ilq create fail\n");
1498 * irdma_initialize_ieq - create iwarp exception queue
1499 * @iwdev: irdma device
1501 * Return 0 if successful, otherwise return error
1503 static int irdma_initialize_ieq(struct irdma_device
*iwdev
)
1505 struct irdma_puda_rsrc_info info
= {};
1508 info
.type
= IRDMA_PUDA_RSRC_TYPE_IEQ
;
1510 info
.qp_id
= iwdev
->vsi
.exception_lan_q
;
1513 info
.abi_ver
= IRDMA_ABI_VER
;
1514 info
.sq_size
= min(iwdev
->rf
->max_qp
/ 2, (u32
)32768);
1515 info
.rq_size
= info
.sq_size
;
1516 info
.buf_size
= iwdev
->vsi
.mtu
+ IRDMA_IPV4_PAD
;
1517 info
.tx_buf_cnt
= 4096;
1518 status
= irdma_puda_create_rsrc(&iwdev
->vsi
, &info
);
1520 ibdev_dbg(&iwdev
->ibdev
, "ERR: ieq create fail\n");
1526 * irdma_reinitialize_ieq - destroy and re-create ieq
1527 * @vsi: VSI structure
1529 void irdma_reinitialize_ieq(struct irdma_sc_vsi
*vsi
)
1531 struct irdma_device
*iwdev
= vsi
->back_vsi
;
1532 struct irdma_pci_f
*rf
= iwdev
->rf
;
1534 irdma_puda_dele_rsrc(vsi
, IRDMA_PUDA_RSRC_TYPE_IEQ
, false);
1535 if (irdma_initialize_ieq(iwdev
)) {
1536 iwdev
->rf
->reset
= true;
1537 rf
->gen_ops
.request_reset(rf
);
1542 * irdma_hmc_setup - create hmc objects for the device
1543 * @rf: RDMA PCI function
1545 * Set up the device private memory space for the number and size of
1546 * the hmc objects and create the objects
1547 * Return 0 if successful, otherwise return error
1549 static int irdma_hmc_setup(struct irdma_pci_f
*rf
)
1554 qpcnt
= rsrc_limits_table
[rf
->limits_sel
].qplimit
;
1556 rf
->sd_type
= IRDMA_SD_TYPE_DIRECT
;
1557 status
= irdma_cfg_fpm_val(&rf
->sc_dev
, qpcnt
);
1561 status
= irdma_create_hmc_objs(rf
, true, rf
->rdma_ver
);
1567 * irdma_del_init_mem - deallocate memory resources
1568 * @rf: RDMA PCI function
1570 static void irdma_del_init_mem(struct irdma_pci_f
*rf
)
1572 struct irdma_sc_dev
*dev
= &rf
->sc_dev
;
1574 kfree(dev
->hmc_info
->sd_table
.sd_entry
);
1575 dev
->hmc_info
->sd_table
.sd_entry
= NULL
;
1576 vfree(rf
->mem_rsrc
);
1577 rf
->mem_rsrc
= NULL
;
1578 dma_free_coherent(rf
->hw
.device
, rf
->obj_mem
.size
, rf
->obj_mem
.va
,
1580 rf
->obj_mem
.va
= NULL
;
1581 if (rf
->rdma_ver
!= IRDMA_GEN_1
) {
1582 bitmap_free(rf
->allocated_ws_nodes
);
1583 rf
->allocated_ws_nodes
= NULL
;
1587 kfree(rf
->iw_msixtbl
);
1588 rf
->iw_msixtbl
= NULL
;
1589 kfree(rf
->hmc_info_mem
);
1590 rf
->hmc_info_mem
= NULL
;
1594 * irdma_initialize_dev - initialize device
1595 * @rf: RDMA PCI function
1597 * Allocate memory for the hmc objects and initialize iwdev
1598 * Return 0 if successful, otherwise clean up the resources
1601 static int irdma_initialize_dev(struct irdma_pci_f
*rf
)
1604 struct irdma_sc_dev
*dev
= &rf
->sc_dev
;
1605 struct irdma_device_init_info info
= {};
1606 struct irdma_dma_mem mem
;
1609 size
= sizeof(struct irdma_hmc_pble_rsrc
) +
1610 sizeof(struct irdma_hmc_info
) +
1611 (sizeof(struct irdma_hmc_obj_info
) * IRDMA_HMC_IW_MAX
);
1613 rf
->hmc_info_mem
= kzalloc(size
, GFP_KERNEL
);
1614 if (!rf
->hmc_info_mem
)
1617 rf
->pble_rsrc
= (struct irdma_hmc_pble_rsrc
*)rf
->hmc_info_mem
;
1618 dev
->hmc_info
= &rf
->hw
.hmc
;
1619 dev
->hmc_info
->hmc_obj
= (struct irdma_hmc_obj_info
*)
1620 (rf
->pble_rsrc
+ 1);
1622 status
= irdma_obj_aligned_mem(rf
, &mem
, IRDMA_QUERY_FPM_BUF_SIZE
,
1623 IRDMA_FPM_QUERY_BUF_ALIGNMENT_M
);
1627 info
.fpm_query_buf_pa
= mem
.pa
;
1628 info
.fpm_query_buf
= mem
.va
;
1630 status
= irdma_obj_aligned_mem(rf
, &mem
, IRDMA_COMMIT_FPM_BUF_SIZE
,
1631 IRDMA_FPM_COMMIT_BUF_ALIGNMENT_M
);
1635 info
.fpm_commit_buf_pa
= mem
.pa
;
1636 info
.fpm_commit_buf
= mem
.va
;
1638 info
.bar0
= rf
->hw
.hw_addr
;
1639 info
.hmc_fn_id
= rf
->pf_id
;
1641 status
= irdma_sc_dev_init(rf
->rdma_ver
, &rf
->sc_dev
, &info
);
1647 kfree(rf
->hmc_info_mem
);
1648 rf
->hmc_info_mem
= NULL
;
1654 * irdma_rt_deinit_hw - clean up the irdma device resources
1655 * @iwdev: irdma device
1657 * remove the mac ip entry and ipv4/ipv6 addresses, destroy the
1658 * device queues and free the pble and the hmc objects
1660 void irdma_rt_deinit_hw(struct irdma_device
*iwdev
)
1662 ibdev_dbg(&iwdev
->ibdev
, "INIT: state = %d\n", iwdev
->init_state
);
1664 switch (iwdev
->init_state
) {
1665 case IP_ADDR_REGISTERED
:
1666 if (iwdev
->rf
->sc_dev
.hw_attrs
.uk_attrs
.hw_rev
== IRDMA_GEN_1
)
1667 irdma_del_local_mac_entry(iwdev
->rf
,
1668 (u8
)iwdev
->mac_ip_table_idx
);
1671 case PBLE_CHUNK_MEM
:
1674 if (!iwdev
->roce_mode
)
1675 irdma_puda_dele_rsrc(&iwdev
->vsi
, IRDMA_PUDA_RSRC_TYPE_IEQ
,
1679 if (!iwdev
->roce_mode
)
1680 irdma_puda_dele_rsrc(&iwdev
->vsi
,
1681 IRDMA_PUDA_RSRC_TYPE_ILQ
,
1685 ibdev_warn(&iwdev
->ibdev
, "bad init_state = %d\n", iwdev
->init_state
);
1689 irdma_cleanup_cm_core(&iwdev
->cm_core
);
1690 if (iwdev
->vsi
.pestat
) {
1691 irdma_vsi_stats_free(&iwdev
->vsi
);
1692 kfree(iwdev
->vsi
.pestat
);
1694 if (iwdev
->cleanup_wq
)
1695 destroy_workqueue(iwdev
->cleanup_wq
);
1698 static int irdma_setup_init_state(struct irdma_pci_f
*rf
)
1702 status
= irdma_save_msix_info(rf
);
1706 rf
->hw
.device
= &rf
->pcidev
->dev
;
1707 rf
->obj_mem
.size
= ALIGN(8192, IRDMA_HW_PAGE_SIZE
);
1708 rf
->obj_mem
.va
= dma_alloc_coherent(rf
->hw
.device
, rf
->obj_mem
.size
,
1709 &rf
->obj_mem
.pa
, GFP_KERNEL
);
1710 if (!rf
->obj_mem
.va
) {
1715 rf
->obj_next
= rf
->obj_mem
;
1716 status
= irdma_initialize_dev(rf
);
1723 dma_free_coherent(rf
->hw
.device
, rf
->obj_mem
.size
, rf
->obj_mem
.va
,
1725 rf
->obj_mem
.va
= NULL
;
1727 kfree(rf
->iw_msixtbl
);
1728 rf
->iw_msixtbl
= NULL
;
1733 * irdma_get_used_rsrc - determine resources used internally
1734 * @iwdev: irdma device
1736 * Called at the end of open to get all internal allocations
1738 static void irdma_get_used_rsrc(struct irdma_device
*iwdev
)
1740 iwdev
->rf
->used_pds
= find_first_zero_bit(iwdev
->rf
->allocated_pds
,
1742 iwdev
->rf
->used_qps
= find_first_zero_bit(iwdev
->rf
->allocated_qps
,
1744 iwdev
->rf
->used_cqs
= find_first_zero_bit(iwdev
->rf
->allocated_cqs
,
1746 iwdev
->rf
->used_mrs
= find_first_zero_bit(iwdev
->rf
->allocated_mrs
,
1750 void irdma_ctrl_deinit_hw(struct irdma_pci_f
*rf
)
1752 enum init_completion_state state
= rf
->init_state
;
1754 rf
->init_state
= INVALID_STATE
;
1755 if (rf
->rsrc_created
) {
1756 irdma_destroy_aeq(rf
);
1757 irdma_destroy_pble_prm(rf
->pble_rsrc
);
1759 rf
->rsrc_created
= false;
1763 irdma_del_ceq_0(rf
);
1766 irdma_destroy_ccq(rf
);
1768 case HW_RSRC_INITIALIZED
:
1769 case HMC_OBJS_CREATED
:
1770 irdma_del_hmc_objects(&rf
->sc_dev
, rf
->sc_dev
.hmc_info
, true,
1771 rf
->reset
, rf
->rdma_ver
);
1774 irdma_destroy_cqp(rf
);
1777 irdma_del_init_mem(rf
);
1781 ibdev_warn(&rf
->iwdev
->ibdev
, "bad init_state = %d\n", rf
->init_state
);
1787 * irdma_rt_init_hw - Initializes runtime portion of HW
1788 * @iwdev: irdma device
1789 * @l2params: qos, tc, mtu info from netdev driver
1791 * Create device queues ILQ, IEQ, CEQs and PBLEs. Setup irdma
1792 * device resource objects.
1794 int irdma_rt_init_hw(struct irdma_device
*iwdev
,
1795 struct irdma_l2params
*l2params
)
1797 struct irdma_pci_f
*rf
= iwdev
->rf
;
1798 struct irdma_sc_dev
*dev
= &rf
->sc_dev
;
1799 struct irdma_vsi_init_info vsi_info
= {};
1800 struct irdma_vsi_stats_info stats_info
= {};
1804 vsi_info
.back_vsi
= iwdev
;
1805 vsi_info
.params
= l2params
;
1806 vsi_info
.pf_data_vsi_num
= iwdev
->vsi_num
;
1807 vsi_info
.register_qset
= rf
->gen_ops
.register_qset
;
1808 vsi_info
.unregister_qset
= rf
->gen_ops
.unregister_qset
;
1809 vsi_info
.exception_lan_q
= 2;
1810 irdma_sc_vsi_init(&iwdev
->vsi
, &vsi_info
);
1812 status
= irdma_setup_cm_core(iwdev
, rf
->rdma_ver
);
1816 stats_info
.pestat
= kzalloc(sizeof(*stats_info
.pestat
), GFP_KERNEL
);
1817 if (!stats_info
.pestat
) {
1818 irdma_cleanup_cm_core(&iwdev
->cm_core
);
1821 stats_info
.fcn_id
= dev
->hmc_fn_id
;
1822 status
= irdma_vsi_stats_init(&iwdev
->vsi
, &stats_info
);
1824 irdma_cleanup_cm_core(&iwdev
->cm_core
);
1825 kfree(stats_info
.pestat
);
1830 if (!iwdev
->roce_mode
) {
1831 status
= irdma_initialize_ilq(iwdev
);
1834 iwdev
->init_state
= ILQ_CREATED
;
1835 status
= irdma_initialize_ieq(iwdev
);
1838 iwdev
->init_state
= IEQ_CREATED
;
1840 if (!rf
->rsrc_created
) {
1841 status
= irdma_setup_ceqs(rf
, &iwdev
->vsi
);
1845 iwdev
->init_state
= CEQS_CREATED
;
1847 status
= irdma_hmc_init_pble(&rf
->sc_dev
,
1854 iwdev
->init_state
= PBLE_CHUNK_MEM
;
1856 status
= irdma_setup_aeq(rf
);
1858 irdma_destroy_pble_prm(rf
->pble_rsrc
);
1862 iwdev
->init_state
= AEQ_CREATED
;
1863 rf
->rsrc_created
= true;
1866 if (iwdev
->rf
->sc_dev
.hw_attrs
.uk_attrs
.hw_rev
== IRDMA_GEN_1
)
1867 irdma_alloc_set_mac(iwdev
);
1868 irdma_add_ip(iwdev
);
1869 iwdev
->init_state
= IP_ADDR_REGISTERED
;
1871 /* handles asynch cleanup tasks - disconnect CM , free qp,
1874 iwdev
->cleanup_wq
= alloc_workqueue("irdma-cleanup-wq",
1875 WQ_UNBOUND
, WQ_UNBOUND_MAX_ACTIVE
);
1876 if (!iwdev
->cleanup_wq
)
1878 irdma_get_used_rsrc(iwdev
);
1879 init_waitqueue_head(&iwdev
->suspend_wq
);
1884 dev_err(&rf
->pcidev
->dev
, "HW runtime init FAIL status = %d last cmpl = %d\n",
1885 status
, iwdev
->init_state
);
1886 irdma_rt_deinit_hw(iwdev
);
1892 * irdma_ctrl_init_hw - Initializes control portion of HW
1893 * @rf: RDMA PCI function
1895 * Create admin queues, HMC obejcts and RF resource objects
1897 int irdma_ctrl_init_hw(struct irdma_pci_f
*rf
)
1899 struct irdma_sc_dev
*dev
= &rf
->sc_dev
;
1902 status
= irdma_setup_init_state(rf
);
1905 rf
->init_state
= INITIAL_STATE
;
1907 status
= irdma_create_cqp(rf
);
1910 rf
->init_state
= CQP_CREATED
;
1912 status
= irdma_hmc_setup(rf
);
1915 rf
->init_state
= HMC_OBJS_CREATED
;
1917 status
= irdma_initialize_hw_rsrc(rf
);
1920 rf
->init_state
= HW_RSRC_INITIALIZED
;
1922 status
= irdma_create_ccq(rf
);
1925 rf
->init_state
= CCQ_CREATED
;
1927 dev
->feature_info
[IRDMA_FEATURE_FW_INFO
] = IRDMA_FW_VER_DEFAULT
;
1928 if (rf
->rdma_ver
!= IRDMA_GEN_1
) {
1929 status
= irdma_get_rdma_features(dev
);
1934 status
= irdma_setup_ceq_0(rf
);
1937 rf
->init_state
= CEQ0_CREATED
;
1938 /* Handles processing of CQP completions */
1940 alloc_ordered_workqueue("cqp_cmpl_wq", WQ_HIGHPRI
);
1941 if (!rf
->cqp_cmpl_wq
) {
1945 INIT_WORK(&rf
->cqp_cmpl_work
, cqp_compl_worker
);
1946 irdma_sc_ccq_arm(dev
->ccq
);
1950 dev_err(&rf
->pcidev
->dev
, "IRDMA hardware initialization FAILED init_state=%d status=%d\n",
1951 rf
->init_state
, status
);
1952 irdma_ctrl_deinit_hw(rf
);
1957 * irdma_set_hw_rsrc - set hw memory resources.
1958 * @rf: RDMA PCI function
1960 static void irdma_set_hw_rsrc(struct irdma_pci_f
*rf
)
1962 rf
->allocated_qps
= (void *)(rf
->mem_rsrc
+
1963 (sizeof(struct irdma_arp_entry
) * rf
->arp_table_size
));
1964 rf
->allocated_cqs
= &rf
->allocated_qps
[BITS_TO_LONGS(rf
->max_qp
)];
1965 rf
->allocated_mrs
= &rf
->allocated_cqs
[BITS_TO_LONGS(rf
->max_cq
)];
1966 rf
->allocated_pds
= &rf
->allocated_mrs
[BITS_TO_LONGS(rf
->max_mr
)];
1967 rf
->allocated_ahs
= &rf
->allocated_pds
[BITS_TO_LONGS(rf
->max_pd
)];
1968 rf
->allocated_mcgs
= &rf
->allocated_ahs
[BITS_TO_LONGS(rf
->max_ah
)];
1969 rf
->allocated_arps
= &rf
->allocated_mcgs
[BITS_TO_LONGS(rf
->max_mcg
)];
1970 rf
->qp_table
= (struct irdma_qp
**)
1971 (&rf
->allocated_arps
[BITS_TO_LONGS(rf
->arp_table_size
)]);
1972 rf
->cq_table
= (struct irdma_cq
**)(&rf
->qp_table
[rf
->max_qp
]);
1974 spin_lock_init(&rf
->rsrc_lock
);
1975 spin_lock_init(&rf
->arp_lock
);
1976 spin_lock_init(&rf
->qptable_lock
);
1977 spin_lock_init(&rf
->cqtable_lock
);
1978 spin_lock_init(&rf
->qh_list_lock
);
1982 * irdma_calc_mem_rsrc_size - calculate memory resources size.
1983 * @rf: RDMA PCI function
1985 static u32
irdma_calc_mem_rsrc_size(struct irdma_pci_f
*rf
)
1989 rsrc_size
= sizeof(struct irdma_arp_entry
) * rf
->arp_table_size
;
1990 rsrc_size
+= sizeof(unsigned long) * BITS_TO_LONGS(rf
->max_qp
);
1991 rsrc_size
+= sizeof(unsigned long) * BITS_TO_LONGS(rf
->max_mr
);
1992 rsrc_size
+= sizeof(unsigned long) * BITS_TO_LONGS(rf
->max_cq
);
1993 rsrc_size
+= sizeof(unsigned long) * BITS_TO_LONGS(rf
->max_pd
);
1994 rsrc_size
+= sizeof(unsigned long) * BITS_TO_LONGS(rf
->arp_table_size
);
1995 rsrc_size
+= sizeof(unsigned long) * BITS_TO_LONGS(rf
->max_ah
);
1996 rsrc_size
+= sizeof(unsigned long) * BITS_TO_LONGS(rf
->max_mcg
);
1997 rsrc_size
+= sizeof(struct irdma_qp
**) * rf
->max_qp
;
1998 rsrc_size
+= sizeof(struct irdma_cq
**) * rf
->max_cq
;
2004 * irdma_initialize_hw_rsrc - initialize hw resource tracking array
2005 * @rf: RDMA PCI function
2007 u32
irdma_initialize_hw_rsrc(struct irdma_pci_f
*rf
)
2013 if (rf
->rdma_ver
!= IRDMA_GEN_1
) {
2014 rf
->allocated_ws_nodes
= bitmap_zalloc(IRDMA_MAX_WS_NODES
,
2016 if (!rf
->allocated_ws_nodes
)
2019 set_bit(0, rf
->allocated_ws_nodes
);
2020 rf
->max_ws_node_id
= IRDMA_MAX_WS_NODES
;
2022 rf
->max_cqe
= rf
->sc_dev
.hw_attrs
.uk_attrs
.max_hw_cq_size
;
2023 rf
->max_qp
= rf
->sc_dev
.hmc_info
->hmc_obj
[IRDMA_HMC_IW_QP
].cnt
;
2024 rf
->max_mr
= rf
->sc_dev
.hmc_info
->hmc_obj
[IRDMA_HMC_IW_MR
].cnt
;
2025 rf
->max_cq
= rf
->sc_dev
.hmc_info
->hmc_obj
[IRDMA_HMC_IW_CQ
].cnt
;
2026 rf
->max_pd
= rf
->sc_dev
.hw_attrs
.max_hw_pds
;
2027 rf
->arp_table_size
= rf
->sc_dev
.hmc_info
->hmc_obj
[IRDMA_HMC_IW_ARP
].cnt
;
2028 rf
->max_ah
= rf
->sc_dev
.hmc_info
->hmc_obj
[IRDMA_HMC_IW_FSIAV
].cnt
;
2029 rf
->max_mcg
= rf
->max_qp
;
2031 rsrc_size
= irdma_calc_mem_rsrc_size(rf
);
2032 rf
->mem_rsrc
= vzalloc(rsrc_size
);
2033 if (!rf
->mem_rsrc
) {
2035 goto mem_rsrc_vzalloc_fail
;
2038 rf
->arp_table
= (struct irdma_arp_entry
*)rf
->mem_rsrc
;
2040 irdma_set_hw_rsrc(rf
);
2042 set_bit(0, rf
->allocated_mrs
);
2043 set_bit(0, rf
->allocated_qps
);
2044 set_bit(0, rf
->allocated_cqs
);
2045 set_bit(0, rf
->allocated_pds
);
2046 set_bit(0, rf
->allocated_arps
);
2047 set_bit(0, rf
->allocated_ahs
);
2048 set_bit(0, rf
->allocated_mcgs
);
2049 set_bit(2, rf
->allocated_qps
); /* qp 2 IEQ */
2050 set_bit(1, rf
->allocated_qps
); /* qp 1 ILQ */
2051 set_bit(1, rf
->allocated_cqs
);
2052 set_bit(1, rf
->allocated_pds
);
2053 set_bit(2, rf
->allocated_cqs
);
2054 set_bit(2, rf
->allocated_pds
);
2056 INIT_LIST_HEAD(&rf
->mc_qht_list
.list
);
2057 /* stag index mask has a minimum of 14 bits */
2058 mrdrvbits
= 24 - max(get_count_order(rf
->max_mr
), 14);
2059 rf
->mr_stagmask
= ~(((1 << mrdrvbits
) - 1) << (32 - mrdrvbits
));
2063 mem_rsrc_vzalloc_fail
:
2064 bitmap_free(rf
->allocated_ws_nodes
);
2065 rf
->allocated_ws_nodes
= NULL
;
2071 * irdma_cqp_ce_handler - handle cqp completions
2072 * @rf: RDMA PCI function
2073 * @cq: cq for cqp completions
2075 void irdma_cqp_ce_handler(struct irdma_pci_f
*rf
, struct irdma_sc_cq
*cq
)
2077 struct irdma_cqp_request
*cqp_request
;
2078 struct irdma_sc_dev
*dev
= &rf
->sc_dev
;
2080 struct irdma_ccq_cqe_info info
;
2081 unsigned long flags
;
2085 memset(&info
, 0, sizeof(info
));
2086 spin_lock_irqsave(&rf
->cqp
.compl_lock
, flags
);
2087 ret
= irdma_sc_ccq_get_cqe_info(cq
, &info
);
2088 spin_unlock_irqrestore(&rf
->cqp
.compl_lock
, flags
);
2092 cqp_request
= (struct irdma_cqp_request
*)
2093 (unsigned long)info
.scratch
;
2094 if (info
.error
&& irdma_cqp_crit_err(dev
, cqp_request
->info
.cqp_cmd
,
2097 ibdev_err(&rf
->iwdev
->ibdev
, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
2098 info
.op_code
, info
.maj_err_code
, info
.min_err_code
);
2100 cqp_request
->compl_info
.maj_err_code
= info
.maj_err_code
;
2101 cqp_request
->compl_info
.min_err_code
= info
.min_err_code
;
2102 cqp_request
->compl_info
.op_ret_val
= info
.op_ret_val
;
2103 cqp_request
->compl_info
.error
= info
.error
;
2105 if (cqp_request
->waiting
) {
2106 WRITE_ONCE(cqp_request
->request_done
, true);
2107 wake_up(&cqp_request
->waitq
);
2108 irdma_put_cqp_request(&rf
->cqp
, cqp_request
);
2110 if (cqp_request
->callback_fcn
)
2111 cqp_request
->callback_fcn(cqp_request
);
2112 irdma_put_cqp_request(&rf
->cqp
, cqp_request
);
2120 irdma_process_bh(dev
);
2121 irdma_sc_ccq_arm(cq
);
2126 * cqp_compl_worker - Handle cqp completions
2127 * @work: Pointer to work structure
2129 void cqp_compl_worker(struct work_struct
*work
)
2131 struct irdma_pci_f
*rf
= container_of(work
, struct irdma_pci_f
,
2133 struct irdma_sc_cq
*cq
= &rf
->ccq
.sc_cq
;
2135 irdma_cqp_ce_handler(rf
, cq
);
2139 * irdma_lookup_apbvt_entry - lookup hash table for an existing apbvt entry corresponding to port
2140 * @cm_core: cm's core
2141 * @port: port to identify apbvt entry
2143 static struct irdma_apbvt_entry
*irdma_lookup_apbvt_entry(struct irdma_cm_core
*cm_core
,
2146 struct irdma_apbvt_entry
*entry
;
2148 hash_for_each_possible(cm_core
->apbvt_hash_tbl
, entry
, hlist
, port
) {
2149 if (entry
->port
== port
) {
2159 * irdma_next_iw_state - modify qp state
2160 * @iwqp: iwarp qp to modify
2161 * @state: next state for qp
2162 * @del_hash: del hash
2163 * @term: term message
2164 * @termlen: length of term message
2166 void irdma_next_iw_state(struct irdma_qp
*iwqp
, u8 state
, u8 del_hash
, u8 term
,
2169 struct irdma_modify_qp_info info
= {};
2171 info
.next_iwarp_state
= state
;
2172 info
.remove_hash_idx
= del_hash
;
2173 info
.cq_num_valid
= true;
2174 info
.arp_cache_idx_valid
= true;
2175 info
.dont_send_term
= true;
2176 info
.dont_send_fin
= true;
2177 info
.termlen
= termlen
;
2179 if (term
& IRDMAQP_TERM_SEND_TERM_ONLY
)
2180 info
.dont_send_term
= false;
2181 if (term
& IRDMAQP_TERM_SEND_FIN_ONLY
)
2182 info
.dont_send_fin
= false;
2183 if (iwqp
->sc_qp
.term_flags
&& state
== IRDMA_QP_STATE_ERROR
)
2184 info
.reset_tcp_conn
= true;
2185 iwqp
->hw_iwarp_state
= state
;
2186 irdma_hw_modify_qp(iwqp
->iwdev
, iwqp
, &info
, 0);
2187 iwqp
->iwarp_state
= info
.next_iwarp_state
;
2191 * irdma_del_local_mac_entry - remove a mac entry from the hw
2193 * @rf: RDMA PCI function
2194 * @idx: the index of the mac ip address to delete
2196 void irdma_del_local_mac_entry(struct irdma_pci_f
*rf
, u16 idx
)
2198 struct irdma_cqp
*iwcqp
= &rf
->cqp
;
2199 struct irdma_cqp_request
*cqp_request
;
2200 struct cqp_cmds_info
*cqp_info
;
2202 cqp_request
= irdma_alloc_and_get_cqp_request(iwcqp
, true);
2206 cqp_info
= &cqp_request
->info
;
2207 cqp_info
->cqp_cmd
= IRDMA_OP_DELETE_LOCAL_MAC_ENTRY
;
2208 cqp_info
->post_sq
= 1;
2209 cqp_info
->in
.u
.del_local_mac_entry
.cqp
= &iwcqp
->sc_cqp
;
2210 cqp_info
->in
.u
.del_local_mac_entry
.scratch
= (uintptr_t)cqp_request
;
2211 cqp_info
->in
.u
.del_local_mac_entry
.entry_idx
= idx
;
2212 cqp_info
->in
.u
.del_local_mac_entry
.ignore_ref_count
= 0;
2214 irdma_handle_cqp_op(rf
, cqp_request
);
2215 irdma_put_cqp_request(iwcqp
, cqp_request
);
2219 * irdma_add_local_mac_entry - add a mac ip address entry to the
2221 * @rf: RDMA PCI function
2222 * @mac_addr: pointer to mac address
2223 * @idx: the index of the mac ip address to add
2225 int irdma_add_local_mac_entry(struct irdma_pci_f
*rf
, const u8
*mac_addr
, u16 idx
)
2227 struct irdma_local_mac_entry_info
*info
;
2228 struct irdma_cqp
*iwcqp
= &rf
->cqp
;
2229 struct irdma_cqp_request
*cqp_request
;
2230 struct cqp_cmds_info
*cqp_info
;
2233 cqp_request
= irdma_alloc_and_get_cqp_request(iwcqp
, true);
2237 cqp_info
= &cqp_request
->info
;
2238 cqp_info
->post_sq
= 1;
2239 info
= &cqp_info
->in
.u
.add_local_mac_entry
.info
;
2240 ether_addr_copy(info
->mac_addr
, mac_addr
);
2241 info
->entry_idx
= idx
;
2242 cqp_info
->in
.u
.add_local_mac_entry
.scratch
= (uintptr_t)cqp_request
;
2243 cqp_info
->cqp_cmd
= IRDMA_OP_ADD_LOCAL_MAC_ENTRY
;
2244 cqp_info
->in
.u
.add_local_mac_entry
.cqp
= &iwcqp
->sc_cqp
;
2245 cqp_info
->in
.u
.add_local_mac_entry
.scratch
= (uintptr_t)cqp_request
;
2247 status
= irdma_handle_cqp_op(rf
, cqp_request
);
2248 irdma_put_cqp_request(iwcqp
, cqp_request
);
2254 * irdma_alloc_local_mac_entry - allocate a mac entry
2255 * @rf: RDMA PCI function
2256 * @mac_tbl_idx: the index of the new mac address
2258 * Allocate a mac address entry and update the mac_tbl_idx
2259 * to hold the index of the newly created mac address
2260 * Return 0 if successful, otherwise return error
2262 int irdma_alloc_local_mac_entry(struct irdma_pci_f
*rf
, u16
*mac_tbl_idx
)
2264 struct irdma_cqp
*iwcqp
= &rf
->cqp
;
2265 struct irdma_cqp_request
*cqp_request
;
2266 struct cqp_cmds_info
*cqp_info
;
2269 cqp_request
= irdma_alloc_and_get_cqp_request(iwcqp
, true);
2273 cqp_info
= &cqp_request
->info
;
2274 cqp_info
->cqp_cmd
= IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY
;
2275 cqp_info
->post_sq
= 1;
2276 cqp_info
->in
.u
.alloc_local_mac_entry
.cqp
= &iwcqp
->sc_cqp
;
2277 cqp_info
->in
.u
.alloc_local_mac_entry
.scratch
= (uintptr_t)cqp_request
;
2278 status
= irdma_handle_cqp_op(rf
, cqp_request
);
2280 *mac_tbl_idx
= (u16
)cqp_request
->compl_info
.op_ret_val
;
2282 irdma_put_cqp_request(iwcqp
, cqp_request
);
2288 * irdma_cqp_manage_apbvt_cmd - send cqp command manage apbvt
2289 * @iwdev: irdma device
2290 * @accel_local_port: port for apbvt
2291 * @add_port: add ordelete port
2293 static int irdma_cqp_manage_apbvt_cmd(struct irdma_device
*iwdev
,
2294 u16 accel_local_port
, bool add_port
)
2296 struct irdma_apbvt_info
*info
;
2297 struct irdma_cqp_request
*cqp_request
;
2298 struct cqp_cmds_info
*cqp_info
;
2301 cqp_request
= irdma_alloc_and_get_cqp_request(&iwdev
->rf
->cqp
, add_port
);
2305 cqp_info
= &cqp_request
->info
;
2306 info
= &cqp_info
->in
.u
.manage_apbvt_entry
.info
;
2307 memset(info
, 0, sizeof(*info
));
2308 info
->add
= add_port
;
2309 info
->port
= accel_local_port
;
2310 cqp_info
->cqp_cmd
= IRDMA_OP_MANAGE_APBVT_ENTRY
;
2311 cqp_info
->post_sq
= 1;
2312 cqp_info
->in
.u
.manage_apbvt_entry
.cqp
= &iwdev
->rf
->cqp
.sc_cqp
;
2313 cqp_info
->in
.u
.manage_apbvt_entry
.scratch
= (uintptr_t)cqp_request
;
2314 ibdev_dbg(&iwdev
->ibdev
, "DEV: %s: port=0x%04x\n",
2315 (!add_port
) ? "DELETE" : "ADD", accel_local_port
);
2317 status
= irdma_handle_cqp_op(iwdev
->rf
, cqp_request
);
2318 irdma_put_cqp_request(&iwdev
->rf
->cqp
, cqp_request
);
2324 * irdma_add_apbvt - add tcp port to HW apbvt table
2325 * @iwdev: irdma device
2326 * @port: port for apbvt
2328 struct irdma_apbvt_entry
*irdma_add_apbvt(struct irdma_device
*iwdev
, u16 port
)
2330 struct irdma_cm_core
*cm_core
= &iwdev
->cm_core
;
2331 struct irdma_apbvt_entry
*entry
;
2332 unsigned long flags
;
2334 spin_lock_irqsave(&cm_core
->apbvt_lock
, flags
);
2335 entry
= irdma_lookup_apbvt_entry(cm_core
, port
);
2337 spin_unlock_irqrestore(&cm_core
->apbvt_lock
, flags
);
2341 entry
= kzalloc(sizeof(*entry
), GFP_ATOMIC
);
2343 spin_unlock_irqrestore(&cm_core
->apbvt_lock
, flags
);
2349 hash_add(cm_core
->apbvt_hash_tbl
, &entry
->hlist
, entry
->port
);
2350 spin_unlock_irqrestore(&cm_core
->apbvt_lock
, flags
);
2352 if (irdma_cqp_manage_apbvt_cmd(iwdev
, port
, true)) {
2361 * irdma_del_apbvt - delete tcp port from HW apbvt table
2362 * @iwdev: irdma device
2363 * @entry: apbvt entry object
2365 void irdma_del_apbvt(struct irdma_device
*iwdev
,
2366 struct irdma_apbvt_entry
*entry
)
2368 struct irdma_cm_core
*cm_core
= &iwdev
->cm_core
;
2369 unsigned long flags
;
2371 spin_lock_irqsave(&cm_core
->apbvt_lock
, flags
);
2372 if (--entry
->use_cnt
) {
2373 spin_unlock_irqrestore(&cm_core
->apbvt_lock
, flags
);
2377 hash_del(&entry
->hlist
);
2378 /* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to
2379 * protect against race where add APBVT CQP can race ahead of the delete
2380 * APBVT for same port.
2382 irdma_cqp_manage_apbvt_cmd(iwdev
, entry
->port
, false);
2384 spin_unlock_irqrestore(&cm_core
->apbvt_lock
, flags
);
2388 * irdma_manage_arp_cache - manage hw arp cache
2389 * @rf: RDMA PCI function
2390 * @mac_addr: mac address ptr
2391 * @ip_addr: ip addr for arp cache
2392 * @ipv4: flag inicating IPv4
2393 * @action: add, delete or modify
2395 void irdma_manage_arp_cache(struct irdma_pci_f
*rf
,
2396 const unsigned char *mac_addr
,
2397 u32
*ip_addr
, bool ipv4
, u32 action
)
2399 struct irdma_add_arp_cache_entry_info
*info
;
2400 struct irdma_cqp_request
*cqp_request
;
2401 struct cqp_cmds_info
*cqp_info
;
2404 arp_index
= irdma_arp_table(rf
, ip_addr
, ipv4
, mac_addr
, action
);
2405 if (arp_index
== -1)
2408 cqp_request
= irdma_alloc_and_get_cqp_request(&rf
->cqp
, false);
2412 cqp_info
= &cqp_request
->info
;
2413 if (action
== IRDMA_ARP_ADD
) {
2414 cqp_info
->cqp_cmd
= IRDMA_OP_ADD_ARP_CACHE_ENTRY
;
2415 info
= &cqp_info
->in
.u
.add_arp_cache_entry
.info
;
2416 memset(info
, 0, sizeof(*info
));
2417 info
->arp_index
= (u16
)arp_index
;
2418 info
->permanent
= true;
2419 ether_addr_copy(info
->mac_addr
, mac_addr
);
2420 cqp_info
->in
.u
.add_arp_cache_entry
.scratch
=
2421 (uintptr_t)cqp_request
;
2422 cqp_info
->in
.u
.add_arp_cache_entry
.cqp
= &rf
->cqp
.sc_cqp
;
2424 cqp_info
->cqp_cmd
= IRDMA_OP_DELETE_ARP_CACHE_ENTRY
;
2425 cqp_info
->in
.u
.del_arp_cache_entry
.scratch
=
2426 (uintptr_t)cqp_request
;
2427 cqp_info
->in
.u
.del_arp_cache_entry
.cqp
= &rf
->cqp
.sc_cqp
;
2428 cqp_info
->in
.u
.del_arp_cache_entry
.arp_index
= arp_index
;
2431 cqp_info
->post_sq
= 1;
2432 irdma_handle_cqp_op(rf
, cqp_request
);
2433 irdma_put_cqp_request(&rf
->cqp
, cqp_request
);
2437 * irdma_send_syn_cqp_callback - do syn/ack after qhash
2438 * @cqp_request: qhash cqp completion
2440 static void irdma_send_syn_cqp_callback(struct irdma_cqp_request
*cqp_request
)
2442 struct irdma_cm_node
*cm_node
= cqp_request
->param
;
2444 irdma_send_syn(cm_node
, 1);
2445 irdma_rem_ref_cm_node(cm_node
);
2449 * irdma_manage_qhash - add or modify qhash
2450 * @iwdev: irdma device
2451 * @cminfo: cm info for qhash
2452 * @etype: type (syn or quad)
2453 * @mtype: type of qhash
2454 * @cmnode: cmnode associated with connection
2455 * @wait: wait for completion
2457 int irdma_manage_qhash(struct irdma_device
*iwdev
, struct irdma_cm_info
*cminfo
,
2458 enum irdma_quad_entry_type etype
,
2459 enum irdma_quad_hash_manage_type mtype
, void *cmnode
,
2462 struct irdma_qhash_table_info
*info
;
2463 struct irdma_cqp
*iwcqp
= &iwdev
->rf
->cqp
;
2464 struct irdma_cqp_request
*cqp_request
;
2465 struct cqp_cmds_info
*cqp_info
;
2466 struct irdma_cm_node
*cm_node
= cmnode
;
2469 cqp_request
= irdma_alloc_and_get_cqp_request(iwcqp
, wait
);
2473 cqp_info
= &cqp_request
->info
;
2474 info
= &cqp_info
->in
.u
.manage_qhash_table_entry
.info
;
2475 memset(info
, 0, sizeof(*info
));
2476 info
->vsi
= &iwdev
->vsi
;
2477 info
->manage
= mtype
;
2478 info
->entry_type
= etype
;
2479 if (cminfo
->vlan_id
< VLAN_N_VID
) {
2480 info
->vlan_valid
= true;
2481 info
->vlan_id
= cminfo
->vlan_id
;
2483 info
->vlan_valid
= false;
2485 info
->ipv4_valid
= cminfo
->ipv4
;
2486 info
->user_pri
= cminfo
->user_pri
;
2487 ether_addr_copy(info
->mac_addr
, iwdev
->netdev
->dev_addr
);
2488 info
->qp_num
= cminfo
->qh_qpid
;
2489 info
->dest_port
= cminfo
->loc_port
;
2490 info
->dest_ip
[0] = cminfo
->loc_addr
[0];
2491 info
->dest_ip
[1] = cminfo
->loc_addr
[1];
2492 info
->dest_ip
[2] = cminfo
->loc_addr
[2];
2493 info
->dest_ip
[3] = cminfo
->loc_addr
[3];
2494 if (etype
== IRDMA_QHASH_TYPE_TCP_ESTABLISHED
||
2495 etype
== IRDMA_QHASH_TYPE_UDP_UNICAST
||
2496 etype
== IRDMA_QHASH_TYPE_UDP_MCAST
||
2497 etype
== IRDMA_QHASH_TYPE_ROCE_MCAST
||
2498 etype
== IRDMA_QHASH_TYPE_ROCEV2_HW
) {
2499 info
->src_port
= cminfo
->rem_port
;
2500 info
->src_ip
[0] = cminfo
->rem_addr
[0];
2501 info
->src_ip
[1] = cminfo
->rem_addr
[1];
2502 info
->src_ip
[2] = cminfo
->rem_addr
[2];
2503 info
->src_ip
[3] = cminfo
->rem_addr
[3];
2506 cqp_request
->callback_fcn
= irdma_send_syn_cqp_callback
;
2507 cqp_request
->param
= cmnode
;
2509 refcount_inc(&cm_node
->refcnt
);
2511 if (info
->ipv4_valid
)
2512 ibdev_dbg(&iwdev
->ibdev
,
2513 "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI4 rem_addr=%pI4 mac=%pM, vlan_id=%d cm_node=%p\n",
2514 (!mtype
) ? "DELETE" : "ADD",
2515 __builtin_return_address(0), info
->dest_port
,
2516 info
->src_port
, info
->dest_ip
, info
->src_ip
,
2517 info
->mac_addr
, cminfo
->vlan_id
,
2518 cmnode
? cmnode
: NULL
);
2520 ibdev_dbg(&iwdev
->ibdev
,
2521 "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI6 rem_addr=%pI6 mac=%pM, vlan_id=%d cm_node=%p\n",
2522 (!mtype
) ? "DELETE" : "ADD",
2523 __builtin_return_address(0), info
->dest_port
,
2524 info
->src_port
, info
->dest_ip
, info
->src_ip
,
2525 info
->mac_addr
, cminfo
->vlan_id
,
2526 cmnode
? cmnode
: NULL
);
2528 cqp_info
->in
.u
.manage_qhash_table_entry
.cqp
= &iwdev
->rf
->cqp
.sc_cqp
;
2529 cqp_info
->in
.u
.manage_qhash_table_entry
.scratch
= (uintptr_t)cqp_request
;
2530 cqp_info
->cqp_cmd
= IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY
;
2531 cqp_info
->post_sq
= 1;
2532 status
= irdma_handle_cqp_op(iwdev
->rf
, cqp_request
);
2533 if (status
&& cm_node
&& !wait
)
2534 irdma_rem_ref_cm_node(cm_node
);
2536 irdma_put_cqp_request(iwcqp
, cqp_request
);
2542 * irdma_hw_flush_wqes_callback - Check return code after flush
2543 * @cqp_request: qhash cqp completion
2545 static void irdma_hw_flush_wqes_callback(struct irdma_cqp_request
*cqp_request
)
2547 struct irdma_qp_flush_info
*hw_info
;
2548 struct irdma_sc_qp
*qp
;
2549 struct irdma_qp
*iwqp
;
2550 struct cqp_cmds_info
*cqp_info
;
2552 cqp_info
= &cqp_request
->info
;
2553 hw_info
= &cqp_info
->in
.u
.qp_flush_wqes
.info
;
2554 qp
= cqp_info
->in
.u
.qp_flush_wqes
.qp
;
2555 iwqp
= qp
->qp_uk
.back_qp
;
2557 if (cqp_request
->compl_info
.maj_err_code
)
2561 (cqp_request
->compl_info
.min_err_code
== IRDMA_CQP_COMPL_SQ_WQE_FLUSHED
||
2562 cqp_request
->compl_info
.min_err_code
== 0)) {
2563 /* RQ WQE flush was requested but did not happen */
2564 qp
->qp_uk
.rq_flush_complete
= true;
2567 (cqp_request
->compl_info
.min_err_code
== IRDMA_CQP_COMPL_RQ_WQE_FLUSHED
||
2568 cqp_request
->compl_info
.min_err_code
== 0)) {
2569 if (IRDMA_RING_MORE_WORK(qp
->qp_uk
.sq_ring
)) {
2570 ibdev_err(&iwqp
->iwdev
->ibdev
, "Flush QP[%d] failed, SQ has more work",
2572 irdma_ib_qp_event(iwqp
, IRDMA_QP_EVENT_CATASTROPHIC
);
2574 qp
->qp_uk
.sq_flush_complete
= true;
2579 * irdma_hw_flush_wqes - flush qp's wqe
2580 * @rf: RDMA PCI function
2581 * @qp: hardware control qp
2582 * @info: info for flush
2583 * @wait: flag wait for completion
2585 int irdma_hw_flush_wqes(struct irdma_pci_f
*rf
, struct irdma_sc_qp
*qp
,
2586 struct irdma_qp_flush_info
*info
, bool wait
)
2589 struct irdma_qp_flush_info
*hw_info
;
2590 struct irdma_cqp_request
*cqp_request
;
2591 struct cqp_cmds_info
*cqp_info
;
2592 struct irdma_qp
*iwqp
= qp
->qp_uk
.back_qp
;
2594 cqp_request
= irdma_alloc_and_get_cqp_request(&rf
->cqp
, wait
);
2598 cqp_info
= &cqp_request
->info
;
2600 cqp_request
->callback_fcn
= irdma_hw_flush_wqes_callback
;
2601 hw_info
= &cqp_request
->info
.in
.u
.qp_flush_wqes
.info
;
2602 memcpy(hw_info
, info
, sizeof(*hw_info
));
2603 cqp_info
->cqp_cmd
= IRDMA_OP_QP_FLUSH_WQES
;
2604 cqp_info
->post_sq
= 1;
2605 cqp_info
->in
.u
.qp_flush_wqes
.qp
= qp
;
2606 cqp_info
->in
.u
.qp_flush_wqes
.scratch
= (uintptr_t)cqp_request
;
2607 status
= irdma_handle_cqp_op(rf
, cqp_request
);
2609 qp
->qp_uk
.sq_flush_complete
= true;
2610 qp
->qp_uk
.rq_flush_complete
= true;
2611 irdma_put_cqp_request(&rf
->cqp
, cqp_request
);
2615 if (!wait
|| cqp_request
->compl_info
.maj_err_code
)
2619 if (cqp_request
->compl_info
.min_err_code
== IRDMA_CQP_COMPL_SQ_WQE_FLUSHED
||
2620 cqp_request
->compl_info
.min_err_code
== 0) {
2621 /* RQ WQE flush was requested but did not happen */
2622 qp
->qp_uk
.rq_flush_complete
= true;
2626 if (cqp_request
->compl_info
.min_err_code
== IRDMA_CQP_COMPL_RQ_WQE_FLUSHED
||
2627 cqp_request
->compl_info
.min_err_code
== 0) {
2629 * Handling case where WQE is posted to empty SQ when
2630 * flush has not completed
2632 if (IRDMA_RING_MORE_WORK(qp
->qp_uk
.sq_ring
)) {
2633 struct irdma_cqp_request
*new_req
;
2635 if (!qp
->qp_uk
.sq_flush_complete
)
2637 qp
->qp_uk
.sq_flush_complete
= false;
2638 qp
->flush_sq
= false;
2642 new_req
= irdma_alloc_and_get_cqp_request(&rf
->cqp
, true);
2647 cqp_info
= &new_req
->info
;
2648 hw_info
= &new_req
->info
.in
.u
.qp_flush_wqes
.info
;
2649 memcpy(hw_info
, info
, sizeof(*hw_info
));
2650 cqp_info
->cqp_cmd
= IRDMA_OP_QP_FLUSH_WQES
;
2651 cqp_info
->post_sq
= 1;
2652 cqp_info
->in
.u
.qp_flush_wqes
.qp
= qp
;
2653 cqp_info
->in
.u
.qp_flush_wqes
.scratch
= (uintptr_t)new_req
;
2655 status
= irdma_handle_cqp_op(rf
, new_req
);
2656 if (new_req
->compl_info
.maj_err_code
||
2657 new_req
->compl_info
.min_err_code
!= IRDMA_CQP_COMPL_SQ_WQE_FLUSHED
||
2659 ibdev_err(&iwqp
->iwdev
->ibdev
, "fatal QP event: SQ in error but not flushed, qp: %d",
2661 qp
->qp_uk
.sq_flush_complete
= false;
2662 irdma_ib_qp_event(iwqp
, IRDMA_QP_EVENT_CATASTROPHIC
);
2664 irdma_put_cqp_request(&rf
->cqp
, new_req
);
2666 /* SQ WQE flush was requested but did not happen */
2667 qp
->qp_uk
.sq_flush_complete
= true;
2670 if (!IRDMA_RING_MORE_WORK(qp
->qp_uk
.sq_ring
))
2671 qp
->qp_uk
.sq_flush_complete
= true;
2675 ibdev_dbg(&rf
->iwdev
->ibdev
,
2676 "VERBS: qp_id=%d qp_type=%d qpstate=%d ibqpstate=%d last_aeq=%d hw_iw_state=%d maj_err_code=%d min_err_code=%d\n",
2677 iwqp
->ibqp
.qp_num
, rf
->protocol_used
, iwqp
->iwarp_state
,
2678 iwqp
->ibqp_state
, iwqp
->last_aeq
, iwqp
->hw_iwarp_state
,
2679 cqp_request
->compl_info
.maj_err_code
,
2680 cqp_request
->compl_info
.min_err_code
);
2682 irdma_put_cqp_request(&rf
->cqp
, cqp_request
);
2688 * irdma_gen_ae - generate AE
2689 * @rf: RDMA PCI function
2690 * @qp: qp associated with AE
2691 * @info: info for ae
2692 * @wait: wait for completion
2694 void irdma_gen_ae(struct irdma_pci_f
*rf
, struct irdma_sc_qp
*qp
,
2695 struct irdma_gen_ae_info
*info
, bool wait
)
2697 struct irdma_gen_ae_info
*ae_info
;
2698 struct irdma_cqp_request
*cqp_request
;
2699 struct cqp_cmds_info
*cqp_info
;
2701 cqp_request
= irdma_alloc_and_get_cqp_request(&rf
->cqp
, wait
);
2705 cqp_info
= &cqp_request
->info
;
2706 ae_info
= &cqp_request
->info
.in
.u
.gen_ae
.info
;
2707 memcpy(ae_info
, info
, sizeof(*ae_info
));
2708 cqp_info
->cqp_cmd
= IRDMA_OP_GEN_AE
;
2709 cqp_info
->post_sq
= 1;
2710 cqp_info
->in
.u
.gen_ae
.qp
= qp
;
2711 cqp_info
->in
.u
.gen_ae
.scratch
= (uintptr_t)cqp_request
;
2713 irdma_handle_cqp_op(rf
, cqp_request
);
2714 irdma_put_cqp_request(&rf
->cqp
, cqp_request
);
2717 void irdma_flush_wqes(struct irdma_qp
*iwqp
, u32 flush_mask
)
2719 struct irdma_qp_flush_info info
= {};
2720 struct irdma_pci_f
*rf
= iwqp
->iwdev
->rf
;
2721 u8 flush_code
= iwqp
->sc_qp
.flush_code
;
2723 if (!(flush_mask
& IRDMA_FLUSH_SQ
) && !(flush_mask
& IRDMA_FLUSH_RQ
))
2726 /* Set flush info fields*/
2727 info
.sq
= flush_mask
& IRDMA_FLUSH_SQ
;
2728 info
.rq
= flush_mask
& IRDMA_FLUSH_RQ
;
2730 /* Generate userflush errors in CQE */
2731 info
.sq_major_code
= IRDMA_FLUSH_MAJOR_ERR
;
2732 info
.sq_minor_code
= FLUSH_GENERAL_ERR
;
2733 info
.rq_major_code
= IRDMA_FLUSH_MAJOR_ERR
;
2734 info
.rq_minor_code
= FLUSH_GENERAL_ERR
;
2735 info
.userflushcode
= true;
2737 if (flush_mask
& IRDMA_REFLUSH
) {
2739 iwqp
->sc_qp
.flush_sq
= false;
2741 iwqp
->sc_qp
.flush_rq
= false;
2744 if (info
.sq
&& iwqp
->sc_qp
.sq_flush_code
)
2745 info
.sq_minor_code
= flush_code
;
2746 if (info
.rq
&& iwqp
->sc_qp
.rq_flush_code
)
2747 info
.rq_minor_code
= flush_code
;
2749 if (!iwqp
->user_mode
)
2750 queue_delayed_work(iwqp
->iwdev
->cleanup_wq
,
2752 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS
));
2756 (void)irdma_hw_flush_wqes(rf
, &iwqp
->sc_qp
, &info
,
2757 flush_mask
& IRDMA_FLUSH_WAIT
);
2758 iwqp
->flush_issued
= true;