1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/types.h>
33 #include <asm/byteorder.h>
34 #include <linux/bitops.h>
35 #include <linux/delay.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/list.h>
41 #include <linux/module.h>
42 #include <linux/mutex.h>
43 #include <linux/pci.h>
44 #include <linux/slab.h>
45 #include <linux/spinlock.h>
46 #include <linux/string.h>
47 #include <linux/if_vlan.h>
53 #include "qed_init_ops.h"
57 #include "qed_reg_addr.h"
58 #include <linux/qed/qed_rdma_if.h>
63 static void qed_roce_free_real_icid(struct qed_hwfn
*p_hwfn
, u16 icid
);
66 qed_roce_async_event(struct qed_hwfn
*p_hwfn
,
68 u16 echo
, union event_ring_data
*data
, u8 fw_return_code
)
70 struct qed_rdma_events events
= p_hwfn
->p_rdma_info
->events
;
72 if (fw_event_code
== ROCE_ASYNC_EVENT_DESTROY_QP_DONE
) {
74 (u16
)le32_to_cpu(data
->rdma_data
.rdma_destroy_qp_data
.cid
);
76 /* icid release in this async event can occur only if the icid
77 * was offloaded to the FW. In case it wasn't offloaded this is
78 * handled in qed_roce_sp_destroy_qp.
80 qed_roce_free_real_icid(p_hwfn
, icid
);
82 if (fw_event_code
== ROCE_ASYNC_EVENT_SRQ_EMPTY
||
83 fw_event_code
== ROCE_ASYNC_EVENT_SRQ_LIMIT
) {
84 u16 srq_id
= (u16
)data
->rdma_data
.async_handle
.lo
;
86 events
.affiliated_event(events
.context
, fw_event_code
,
89 union rdma_eqe_data rdata
= data
->rdma_data
;
91 events
.affiliated_event(events
.context
, fw_event_code
,
92 (void *)&rdata
.async_handle
);
99 void qed_roce_stop(struct qed_hwfn
*p_hwfn
)
101 struct qed_bmap
*rcid_map
= &p_hwfn
->p_rdma_info
->real_cid_map
;
104 /* when destroying a_RoCE QP the control is returned to the user after
105 * the synchronous part. The asynchronous part may take a little longer.
106 * We delay for a short while if an async destroy QP is still expected.
107 * Beyond the added delay we clear the bitmap anyway.
109 while (bitmap_weight(rcid_map
->bitmap
, rcid_map
->max_count
)) {
111 if (wait_count
++ > 20) {
112 DP_NOTICE(p_hwfn
, "cid bitmap wait timed out\n");
116 qed_spq_unregister_async_cb(p_hwfn
, PROTOCOLID_ROCE
);
119 static void qed_rdma_copy_gids(struct qed_rdma_qp
*qp
, __le32
*src_gid
,
124 if (qp
->roce_mode
== ROCE_V2_IPV4
) {
125 /* The IPv4 addresses shall be aligned to the highest word.
126 * The lower words must be zero.
128 memset(src_gid
, 0, sizeof(union qed_gid
));
129 memset(dst_gid
, 0, sizeof(union qed_gid
));
130 src_gid
[3] = cpu_to_le32(qp
->sgid
.ipv4_addr
);
131 dst_gid
[3] = cpu_to_le32(qp
->dgid
.ipv4_addr
);
133 /* GIDs and IPv6 addresses coincide in location and size */
134 for (i
= 0; i
< ARRAY_SIZE(qp
->sgid
.dwords
); i
++) {
135 src_gid
[i
] = cpu_to_le32(qp
->sgid
.dwords
[i
]);
136 dst_gid
[i
] = cpu_to_le32(qp
->dgid
.dwords
[i
]);
141 static enum roce_flavor
qed_roce_mode_to_flavor(enum roce_mode roce_mode
)
151 return MAX_ROCE_FLAVOR
;
155 static void qed_roce_free_cid_pair(struct qed_hwfn
*p_hwfn
, u16 cid
)
157 spin_lock_bh(&p_hwfn
->p_rdma_info
->lock
);
158 qed_bmap_release_id(p_hwfn
, &p_hwfn
->p_rdma_info
->cid_map
, cid
);
159 qed_bmap_release_id(p_hwfn
, &p_hwfn
->p_rdma_info
->cid_map
, cid
+ 1);
160 spin_unlock_bh(&p_hwfn
->p_rdma_info
->lock
);
163 int qed_roce_alloc_cid(struct qed_hwfn
*p_hwfn
, u16
*cid
)
165 struct qed_rdma_info
*p_rdma_info
= p_hwfn
->p_rdma_info
;
170 spin_lock_bh(&p_hwfn
->p_rdma_info
->lock
);
171 rc
= qed_rdma_bmap_alloc_id(p_hwfn
, &p_rdma_info
->cid_map
,
174 spin_unlock_bh(&p_rdma_info
->lock
);
178 rc
= qed_rdma_bmap_alloc_id(p_hwfn
, &p_rdma_info
->cid_map
,
181 spin_unlock_bh(&p_rdma_info
->lock
);
185 /* the two icid's should be adjacent */
186 if ((requester_icid
- responder_icid
) != 1) {
187 DP_NOTICE(p_hwfn
, "Failed to allocate two adjacent qp's'\n");
192 responder_icid
+= qed_cxt_get_proto_cid_start(p_hwfn
,
194 requester_icid
+= qed_cxt_get_proto_cid_start(p_hwfn
,
197 /* If these icids require a new ILT line allocate DMA-able context for
200 rc
= qed_cxt_dynamic_ilt_alloc(p_hwfn
, QED_ELEM_CXT
, responder_icid
);
204 rc
= qed_cxt_dynamic_ilt_alloc(p_hwfn
, QED_ELEM_CXT
, requester_icid
);
208 *cid
= (u16
)responder_icid
;
212 spin_lock_bh(&p_rdma_info
->lock
);
213 qed_bmap_release_id(p_hwfn
, &p_rdma_info
->cid_map
, responder_icid
);
214 qed_bmap_release_id(p_hwfn
, &p_rdma_info
->cid_map
, requester_icid
);
216 spin_unlock_bh(&p_rdma_info
->lock
);
217 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
,
218 "Allocate CID - failed, rc = %d\n", rc
);
222 static void qed_roce_set_real_cid(struct qed_hwfn
*p_hwfn
, u32 cid
)
224 spin_lock_bh(&p_hwfn
->p_rdma_info
->lock
);
225 qed_bmap_set_id(p_hwfn
, &p_hwfn
->p_rdma_info
->real_cid_map
, cid
);
226 spin_unlock_bh(&p_hwfn
->p_rdma_info
->lock
);
229 static u8
qed_roce_get_qp_tc(struct qed_hwfn
*p_hwfn
, struct qed_rdma_qp
*qp
)
234 pri
= (qp
->vlan_id
& VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
235 tc
= qed_dcbx_get_priority_tc(p_hwfn
, pri
);
238 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
239 "qp icid %u tc: %u (vlan priority %s)\n",
240 qp
->icid
, tc
, qp
->vlan_id
? "enabled" : "disabled");
245 static int qed_roce_sp_create_responder(struct qed_hwfn
*p_hwfn
,
246 struct qed_rdma_qp
*qp
)
248 struct roce_create_qp_resp_ramrod_data
*p_ramrod
;
249 u16 regular_latency_queue
, low_latency_queue
;
250 struct qed_sp_init_data init_data
;
251 enum roce_flavor roce_flavor
;
252 struct qed_spq_entry
*p_ent
;
253 enum protocol_type proto
;
257 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "icid = %08x\n", qp
->icid
);
259 /* Allocate DMA-able memory for IRQ */
260 qp
->irq_num_pages
= 1;
261 qp
->irq
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
263 &qp
->irq_phys_addr
, GFP_KERNEL
);
267 "qed create responder failed: cannot allocate memory (irq). rc = %d\n",
273 memset(&init_data
, 0, sizeof(init_data
));
274 init_data
.cid
= qp
->icid
;
275 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
276 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
278 rc
= qed_sp_init_request(p_hwfn
, &p_ent
, ROCE_RAMROD_CREATE_QP
,
279 PROTOCOLID_ROCE
, &init_data
);
283 p_ramrod
= &p_ent
->ramrod
.roce_create_qp_resp
;
287 roce_flavor
= qed_roce_mode_to_flavor(qp
->roce_mode
);
288 SET_FIELD(p_ramrod
->flags
,
289 ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR
, roce_flavor
);
291 SET_FIELD(p_ramrod
->flags
,
292 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN
,
293 qp
->incoming_rdma_read_en
);
295 SET_FIELD(p_ramrod
->flags
,
296 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN
,
297 qp
->incoming_rdma_write_en
);
299 SET_FIELD(p_ramrod
->flags
,
300 ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN
,
301 qp
->incoming_atomic_en
);
303 SET_FIELD(p_ramrod
->flags
,
304 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN
,
305 qp
->e2e_flow_control_en
);
307 SET_FIELD(p_ramrod
->flags
,
308 ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG
, qp
->use_srq
);
310 SET_FIELD(p_ramrod
->flags
,
311 ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN
,
312 qp
->fmr_and_reserved_lkey
);
314 SET_FIELD(p_ramrod
->flags
,
315 ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER
,
316 qp
->min_rnr_nak_timer
);
318 p_ramrod
->max_ird
= qp
->max_rd_atomic_resp
;
319 p_ramrod
->traffic_class
= qp
->traffic_class_tos
;
320 p_ramrod
->hop_limit
= qp
->hop_limit_ttl
;
321 p_ramrod
->irq_num_pages
= qp
->irq_num_pages
;
322 p_ramrod
->p_key
= cpu_to_le16(qp
->pkey
);
323 p_ramrod
->flow_label
= cpu_to_le32(qp
->flow_label
);
324 p_ramrod
->dst_qp_id
= cpu_to_le32(qp
->dest_qp
);
325 p_ramrod
->mtu
= cpu_to_le16(qp
->mtu
);
326 p_ramrod
->initial_psn
= cpu_to_le32(qp
->rq_psn
);
327 p_ramrod
->pd
= cpu_to_le16(qp
->pd
);
328 p_ramrod
->rq_num_pages
= cpu_to_le16(qp
->rq_num_pages
);
329 DMA_REGPAIR_LE(p_ramrod
->rq_pbl_addr
, qp
->rq_pbl_ptr
);
330 DMA_REGPAIR_LE(p_ramrod
->irq_pbl_addr
, qp
->irq_phys_addr
);
331 qed_rdma_copy_gids(qp
, p_ramrod
->src_gid
, p_ramrod
->dst_gid
);
332 p_ramrod
->qp_handle_for_async
.hi
= cpu_to_le32(qp
->qp_handle_async
.hi
);
333 p_ramrod
->qp_handle_for_async
.lo
= cpu_to_le32(qp
->qp_handle_async
.lo
);
334 p_ramrod
->qp_handle_for_cqe
.hi
= cpu_to_le32(qp
->qp_handle
.hi
);
335 p_ramrod
->qp_handle_for_cqe
.lo
= cpu_to_le32(qp
->qp_handle
.lo
);
336 p_ramrod
->cq_cid
= cpu_to_le32((p_hwfn
->hw_info
.opaque_fid
<< 16) |
339 tc
= qed_roce_get_qp_tc(p_hwfn
, qp
);
340 regular_latency_queue
= qed_get_cm_pq_idx_ofld_mtc(p_hwfn
, tc
);
341 low_latency_queue
= qed_get_cm_pq_idx_llt_mtc(p_hwfn
, tc
);
342 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
343 "qp icid %u pqs: regular_latency %u low_latency %u\n",
344 qp
->icid
, regular_latency_queue
- CM_TX_PQ_BASE
,
345 low_latency_queue
- CM_TX_PQ_BASE
);
346 p_ramrod
->regular_latency_phy_queue
=
347 cpu_to_le16(regular_latency_queue
);
348 p_ramrod
->low_latency_phy_queue
=
349 cpu_to_le16(low_latency_queue
);
351 p_ramrod
->dpi
= cpu_to_le16(qp
->dpi
);
353 qed_rdma_set_fw_mac(p_ramrod
->remote_mac_addr
, qp
->remote_mac_addr
);
354 qed_rdma_set_fw_mac(p_ramrod
->local_mac_addr
, qp
->local_mac_addr
);
356 p_ramrod
->udp_src_port
= qp
->udp_src_port
;
357 p_ramrod
->vlan_id
= cpu_to_le16(qp
->vlan_id
);
358 p_ramrod
->srq_id
.srq_idx
= cpu_to_le16(qp
->srq_id
);
359 p_ramrod
->srq_id
.opaque_fid
= cpu_to_le16(p_hwfn
->hw_info
.opaque_fid
);
361 p_ramrod
->stats_counter_id
= RESC_START(p_hwfn
, QED_RDMA_STATS_QUEUE
) +
364 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
368 qp
->resp_offloaded
= true;
371 proto
= p_hwfn
->p_rdma_info
->proto
;
372 qed_roce_set_real_cid(p_hwfn
, qp
->icid
-
373 qed_cxt_get_proto_cid_start(p_hwfn
, proto
));
378 DP_NOTICE(p_hwfn
, "create responder - failed, rc = %d\n", rc
);
379 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
380 qp
->irq_num_pages
* RDMA_RING_PAGE_SIZE
,
381 qp
->irq
, qp
->irq_phys_addr
);
386 static int qed_roce_sp_create_requester(struct qed_hwfn
*p_hwfn
,
387 struct qed_rdma_qp
*qp
)
389 struct roce_create_qp_req_ramrod_data
*p_ramrod
;
390 u16 regular_latency_queue
, low_latency_queue
;
391 struct qed_sp_init_data init_data
;
392 enum roce_flavor roce_flavor
;
393 struct qed_spq_entry
*p_ent
;
394 enum protocol_type proto
;
398 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "icid = %08x\n", qp
->icid
);
400 /* Allocate DMA-able memory for ORQ */
401 qp
->orq_num_pages
= 1;
402 qp
->orq
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
404 &qp
->orq_phys_addr
, GFP_KERNEL
);
408 "qed create requester failed: cannot allocate memory (orq). rc = %d\n",
414 memset(&init_data
, 0, sizeof(init_data
));
415 init_data
.cid
= qp
->icid
+ 1;
416 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
417 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
419 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
420 ROCE_RAMROD_CREATE_QP
,
421 PROTOCOLID_ROCE
, &init_data
);
425 p_ramrod
= &p_ent
->ramrod
.roce_create_qp_req
;
429 roce_flavor
= qed_roce_mode_to_flavor(qp
->roce_mode
);
430 SET_FIELD(p_ramrod
->flags
,
431 ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR
, roce_flavor
);
433 SET_FIELD(p_ramrod
->flags
,
434 ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN
,
435 qp
->fmr_and_reserved_lkey
);
437 SET_FIELD(p_ramrod
->flags
,
438 ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP
, qp
->signal_all
);
440 SET_FIELD(p_ramrod
->flags
,
441 ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT
, qp
->retry_cnt
);
443 SET_FIELD(p_ramrod
->flags
,
444 ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT
,
447 p_ramrod
->max_ord
= qp
->max_rd_atomic_req
;
448 p_ramrod
->traffic_class
= qp
->traffic_class_tos
;
449 p_ramrod
->hop_limit
= qp
->hop_limit_ttl
;
450 p_ramrod
->orq_num_pages
= qp
->orq_num_pages
;
451 p_ramrod
->p_key
= cpu_to_le16(qp
->pkey
);
452 p_ramrod
->flow_label
= cpu_to_le32(qp
->flow_label
);
453 p_ramrod
->dst_qp_id
= cpu_to_le32(qp
->dest_qp
);
454 p_ramrod
->ack_timeout_val
= cpu_to_le32(qp
->ack_timeout
);
455 p_ramrod
->mtu
= cpu_to_le16(qp
->mtu
);
456 p_ramrod
->initial_psn
= cpu_to_le32(qp
->sq_psn
);
457 p_ramrod
->pd
= cpu_to_le16(qp
->pd
);
458 p_ramrod
->sq_num_pages
= cpu_to_le16(qp
->sq_num_pages
);
459 DMA_REGPAIR_LE(p_ramrod
->sq_pbl_addr
, qp
->sq_pbl_ptr
);
460 DMA_REGPAIR_LE(p_ramrod
->orq_pbl_addr
, qp
->orq_phys_addr
);
461 qed_rdma_copy_gids(qp
, p_ramrod
->src_gid
, p_ramrod
->dst_gid
);
462 p_ramrod
->qp_handle_for_async
.hi
= cpu_to_le32(qp
->qp_handle_async
.hi
);
463 p_ramrod
->qp_handle_for_async
.lo
= cpu_to_le32(qp
->qp_handle_async
.lo
);
464 p_ramrod
->qp_handle_for_cqe
.hi
= cpu_to_le32(qp
->qp_handle
.hi
);
465 p_ramrod
->qp_handle_for_cqe
.lo
= cpu_to_le32(qp
->qp_handle
.lo
);
467 cpu_to_le32((p_hwfn
->hw_info
.opaque_fid
<< 16) | qp
->sq_cq_id
);
469 tc
= qed_roce_get_qp_tc(p_hwfn
, qp
);
470 regular_latency_queue
= qed_get_cm_pq_idx_ofld_mtc(p_hwfn
, tc
);
471 low_latency_queue
= qed_get_cm_pq_idx_llt_mtc(p_hwfn
, tc
);
472 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
473 "qp icid %u pqs: regular_latency %u low_latency %u\n",
474 qp
->icid
, regular_latency_queue
- CM_TX_PQ_BASE
,
475 low_latency_queue
- CM_TX_PQ_BASE
);
476 p_ramrod
->regular_latency_phy_queue
=
477 cpu_to_le16(regular_latency_queue
);
478 p_ramrod
->low_latency_phy_queue
=
479 cpu_to_le16(low_latency_queue
);
481 p_ramrod
->dpi
= cpu_to_le16(qp
->dpi
);
483 qed_rdma_set_fw_mac(p_ramrod
->remote_mac_addr
, qp
->remote_mac_addr
);
484 qed_rdma_set_fw_mac(p_ramrod
->local_mac_addr
, qp
->local_mac_addr
);
486 p_ramrod
->udp_src_port
= qp
->udp_src_port
;
487 p_ramrod
->vlan_id
= cpu_to_le16(qp
->vlan_id
);
488 p_ramrod
->stats_counter_id
= RESC_START(p_hwfn
, QED_RDMA_STATS_QUEUE
) +
491 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
495 qp
->req_offloaded
= true;
496 proto
= p_hwfn
->p_rdma_info
->proto
;
497 qed_roce_set_real_cid(p_hwfn
,
499 qed_cxt_get_proto_cid_start(p_hwfn
, proto
));
504 DP_NOTICE(p_hwfn
, "Create requested - failed, rc = %d\n", rc
);
505 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
506 qp
->orq_num_pages
* RDMA_RING_PAGE_SIZE
,
507 qp
->orq
, qp
->orq_phys_addr
);
511 static int qed_roce_sp_modify_responder(struct qed_hwfn
*p_hwfn
,
512 struct qed_rdma_qp
*qp
,
513 bool move_to_err
, u32 modify_flags
)
515 struct roce_modify_qp_resp_ramrod_data
*p_ramrod
;
516 struct qed_sp_init_data init_data
;
517 struct qed_spq_entry
*p_ent
;
520 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "icid = %08x\n", qp
->icid
);
522 if (move_to_err
&& !qp
->resp_offloaded
)
526 memset(&init_data
, 0, sizeof(init_data
));
527 init_data
.cid
= qp
->icid
;
528 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
529 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
531 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
532 ROCE_EVENT_MODIFY_QP
,
533 PROTOCOLID_ROCE
, &init_data
);
535 DP_NOTICE(p_hwfn
, "rc = %d\n", rc
);
539 p_ramrod
= &p_ent
->ramrod
.roce_modify_qp_resp
;
543 SET_FIELD(p_ramrod
->flags
,
544 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG
, move_to_err
);
546 SET_FIELD(p_ramrod
->flags
,
547 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN
,
548 qp
->incoming_rdma_read_en
);
550 SET_FIELD(p_ramrod
->flags
,
551 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN
,
552 qp
->incoming_rdma_write_en
);
554 SET_FIELD(p_ramrod
->flags
,
555 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN
,
556 qp
->incoming_atomic_en
);
558 SET_FIELD(p_ramrod
->flags
,
559 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN
,
560 qp
->e2e_flow_control_en
);
562 SET_FIELD(p_ramrod
->flags
,
563 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG
,
564 GET_FIELD(modify_flags
,
565 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN
));
567 SET_FIELD(p_ramrod
->flags
,
568 ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG
,
569 GET_FIELD(modify_flags
, QED_ROCE_MODIFY_QP_VALID_PKEY
));
571 SET_FIELD(p_ramrod
->flags
,
572 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG
,
573 GET_FIELD(modify_flags
,
574 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR
));
576 SET_FIELD(p_ramrod
->flags
,
577 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG
,
578 GET_FIELD(modify_flags
,
579 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP
));
581 SET_FIELD(p_ramrod
->flags
,
582 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG
,
583 GET_FIELD(modify_flags
,
584 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER
));
586 p_ramrod
->fields
= 0;
587 SET_FIELD(p_ramrod
->fields
,
588 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER
,
589 qp
->min_rnr_nak_timer
);
591 p_ramrod
->max_ird
= qp
->max_rd_atomic_resp
;
592 p_ramrod
->traffic_class
= qp
->traffic_class_tos
;
593 p_ramrod
->hop_limit
= qp
->hop_limit_ttl
;
594 p_ramrod
->p_key
= cpu_to_le16(qp
->pkey
);
595 p_ramrod
->flow_label
= cpu_to_le32(qp
->flow_label
);
596 p_ramrod
->mtu
= cpu_to_le16(qp
->mtu
);
597 qed_rdma_copy_gids(qp
, p_ramrod
->src_gid
, p_ramrod
->dst_gid
);
598 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
600 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Modify responder, rc = %d\n", rc
);
604 static int qed_roce_sp_modify_requester(struct qed_hwfn
*p_hwfn
,
605 struct qed_rdma_qp
*qp
,
607 bool move_to_err
, u32 modify_flags
)
609 struct roce_modify_qp_req_ramrod_data
*p_ramrod
;
610 struct qed_sp_init_data init_data
;
611 struct qed_spq_entry
*p_ent
;
614 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "icid = %08x\n", qp
->icid
);
616 if (move_to_err
&& !(qp
->req_offloaded
))
620 memset(&init_data
, 0, sizeof(init_data
));
621 init_data
.cid
= qp
->icid
+ 1;
622 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
623 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
625 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
626 ROCE_EVENT_MODIFY_QP
,
627 PROTOCOLID_ROCE
, &init_data
);
629 DP_NOTICE(p_hwfn
, "rc = %d\n", rc
);
633 p_ramrod
= &p_ent
->ramrod
.roce_modify_qp_req
;
637 SET_FIELD(p_ramrod
->flags
,
638 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG
, move_to_err
);
640 SET_FIELD(p_ramrod
->flags
,
641 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG
, move_to_sqd
);
643 SET_FIELD(p_ramrod
->flags
,
644 ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY
,
647 SET_FIELD(p_ramrod
->flags
,
648 ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG
,
649 GET_FIELD(modify_flags
, QED_ROCE_MODIFY_QP_VALID_PKEY
));
651 SET_FIELD(p_ramrod
->flags
,
652 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG
,
653 GET_FIELD(modify_flags
,
654 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR
));
656 SET_FIELD(p_ramrod
->flags
,
657 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG
,
658 GET_FIELD(modify_flags
,
659 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ
));
661 SET_FIELD(p_ramrod
->flags
,
662 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG
,
663 GET_FIELD(modify_flags
,
664 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT
));
666 SET_FIELD(p_ramrod
->flags
,
667 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG
,
668 GET_FIELD(modify_flags
, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT
));
670 SET_FIELD(p_ramrod
->flags
,
671 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG
,
672 GET_FIELD(modify_flags
,
673 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT
));
675 p_ramrod
->fields
= 0;
676 SET_FIELD(p_ramrod
->fields
,
677 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT
, qp
->retry_cnt
);
679 SET_FIELD(p_ramrod
->fields
,
680 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT
,
683 p_ramrod
->max_ord
= qp
->max_rd_atomic_req
;
684 p_ramrod
->traffic_class
= qp
->traffic_class_tos
;
685 p_ramrod
->hop_limit
= qp
->hop_limit_ttl
;
686 p_ramrod
->p_key
= cpu_to_le16(qp
->pkey
);
687 p_ramrod
->flow_label
= cpu_to_le32(qp
->flow_label
);
688 p_ramrod
->ack_timeout_val
= cpu_to_le32(qp
->ack_timeout
);
689 p_ramrod
->mtu
= cpu_to_le16(qp
->mtu
);
690 qed_rdma_copy_gids(qp
, p_ramrod
->src_gid
, p_ramrod
->dst_gid
);
691 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
693 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Modify requester, rc = %d\n", rc
);
697 static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn
*p_hwfn
,
698 struct qed_rdma_qp
*qp
,
701 struct roce_destroy_qp_resp_output_params
*p_ramrod_res
;
702 struct roce_destroy_qp_resp_ramrod_data
*p_ramrod
;
703 struct qed_sp_init_data init_data
;
704 struct qed_spq_entry
*p_ent
;
705 dma_addr_t ramrod_res_phys
;
708 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "icid = %08x\n", qp
->icid
);
709 *cq_prod
= qp
->cq_prod
;
711 if (!qp
->resp_offloaded
) {
712 /* If a responder was never offload, we need to free the cids
713 * allocated in create_qp as a FW async event will never arrive
718 qed_cxt_get_proto_cid_start(p_hwfn
,
719 p_hwfn
->p_rdma_info
->proto
);
720 qed_roce_free_cid_pair(p_hwfn
, (u16
)cid
);
726 memset(&init_data
, 0, sizeof(init_data
));
727 init_data
.cid
= qp
->icid
;
728 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
729 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
731 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
732 ROCE_RAMROD_DESTROY_QP
,
733 PROTOCOLID_ROCE
, &init_data
);
737 p_ramrod
= &p_ent
->ramrod
.roce_destroy_qp_resp
;
739 p_ramrod_res
= (struct roce_destroy_qp_resp_output_params
*)
740 dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
, sizeof(*p_ramrod_res
),
741 &ramrod_res_phys
, GFP_KERNEL
);
746 "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
748 qed_sp_destroy_request(p_hwfn
, p_ent
);
752 DMA_REGPAIR_LE(p_ramrod
->output_params_addr
, ramrod_res_phys
);
754 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
758 *cq_prod
= le32_to_cpu(p_ramrod_res
->cq_prod
);
759 qp
->cq_prod
= *cq_prod
;
761 /* Free IRQ - only if ramrod succeeded, in case FW is still using it */
762 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
763 qp
->irq_num_pages
* RDMA_RING_PAGE_SIZE
,
764 qp
->irq
, qp
->irq_phys_addr
);
766 qp
->resp_offloaded
= false;
768 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Destroy responder, rc = %d\n", rc
);
771 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
772 sizeof(struct roce_destroy_qp_resp_output_params
),
773 p_ramrod_res
, ramrod_res_phys
);
778 static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn
*p_hwfn
,
779 struct qed_rdma_qp
*qp
)
781 struct roce_destroy_qp_req_output_params
*p_ramrod_res
;
782 struct roce_destroy_qp_req_ramrod_data
*p_ramrod
;
783 struct qed_sp_init_data init_data
;
784 struct qed_spq_entry
*p_ent
;
785 dma_addr_t ramrod_res_phys
;
788 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "icid = %08x\n", qp
->icid
);
790 if (!qp
->req_offloaded
)
793 p_ramrod_res
= (struct roce_destroy_qp_req_output_params
*)
794 dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
795 sizeof(*p_ramrod_res
),
796 &ramrod_res_phys
, GFP_KERNEL
);
799 "qed destroy requester failed: cannot allocate memory (ramrod)\n");
804 memset(&init_data
, 0, sizeof(init_data
));
805 init_data
.cid
= qp
->icid
+ 1;
806 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
807 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
809 rc
= qed_sp_init_request(p_hwfn
, &p_ent
, ROCE_RAMROD_DESTROY_QP
,
810 PROTOCOLID_ROCE
, &init_data
);
814 p_ramrod
= &p_ent
->ramrod
.roce_destroy_qp_req
;
815 DMA_REGPAIR_LE(p_ramrod
->output_params_addr
, ramrod_res_phys
);
817 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
822 /* Free ORQ - only if ramrod succeeded, in case FW is still using it */
823 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
824 qp
->orq_num_pages
* RDMA_RING_PAGE_SIZE
,
825 qp
->orq
, qp
->orq_phys_addr
);
827 qp
->req_offloaded
= false;
829 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Destroy requester, rc = %d\n", rc
);
832 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
, sizeof(*p_ramrod_res
),
833 p_ramrod_res
, ramrod_res_phys
);
838 int qed_roce_query_qp(struct qed_hwfn
*p_hwfn
,
839 struct qed_rdma_qp
*qp
,
840 struct qed_rdma_query_qp_out_params
*out_params
)
842 struct roce_query_qp_resp_output_params
*p_resp_ramrod_res
;
843 struct roce_query_qp_req_output_params
*p_req_ramrod_res
;
844 struct roce_query_qp_resp_ramrod_data
*p_resp_ramrod
;
845 struct roce_query_qp_req_ramrod_data
*p_req_ramrod
;
846 struct qed_sp_init_data init_data
;
847 dma_addr_t resp_ramrod_res_phys
;
848 dma_addr_t req_ramrod_res_phys
;
849 struct qed_spq_entry
*p_ent
;
855 if ((!(qp
->resp_offloaded
)) && (!(qp
->req_offloaded
))) {
856 /* We can't send ramrod to the fw since this qp wasn't offloaded
859 out_params
->draining
= false;
860 out_params
->rq_psn
= qp
->rq_psn
;
861 out_params
->sq_psn
= qp
->sq_psn
;
862 out_params
->state
= qp
->cur_state
;
864 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "No QPs as no offload\n");
868 if (!(qp
->resp_offloaded
)) {
870 "The responder's qp should be offloaded before requester's\n");
874 /* Send a query responder ramrod to FW to get RQ-PSN and state */
875 p_resp_ramrod_res
= (struct roce_query_qp_resp_output_params
*)
876 dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
877 sizeof(*p_resp_ramrod_res
),
878 &resp_ramrod_res_phys
, GFP_KERNEL
);
879 if (!p_resp_ramrod_res
) {
881 "qed query qp failed: cannot allocate memory (ramrod)\n");
886 memset(&init_data
, 0, sizeof(init_data
));
887 init_data
.cid
= qp
->icid
;
888 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
889 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
890 rc
= qed_sp_init_request(p_hwfn
, &p_ent
, ROCE_RAMROD_QUERY_QP
,
891 PROTOCOLID_ROCE
, &init_data
);
895 p_resp_ramrod
= &p_ent
->ramrod
.roce_query_qp_resp
;
896 DMA_REGPAIR_LE(p_resp_ramrod
->output_params_addr
, resp_ramrod_res_phys
);
898 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
902 out_params
->rq_psn
= le32_to_cpu(p_resp_ramrod_res
->psn
);
903 rq_err_state
= GET_FIELD(le32_to_cpu(p_resp_ramrod_res
->err_flag
),
904 ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG
);
906 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
, sizeof(*p_resp_ramrod_res
),
907 p_resp_ramrod_res
, resp_ramrod_res_phys
);
909 if (!(qp
->req_offloaded
)) {
910 /* Don't send query qp for the requester */
911 out_params
->sq_psn
= qp
->sq_psn
;
912 out_params
->draining
= false;
915 qp
->cur_state
= QED_ROCE_QP_STATE_ERR
;
917 out_params
->state
= qp
->cur_state
;
922 /* Send a query requester ramrod to FW to get SQ-PSN and state */
923 p_req_ramrod_res
= (struct roce_query_qp_req_output_params
*)
924 dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
925 sizeof(*p_req_ramrod_res
),
926 &req_ramrod_res_phys
,
928 if (!p_req_ramrod_res
) {
931 "qed query qp failed: cannot allocate memory (ramrod)\n");
936 init_data
.cid
= qp
->icid
+ 1;
937 rc
= qed_sp_init_request(p_hwfn
, &p_ent
, ROCE_RAMROD_QUERY_QP
,
938 PROTOCOLID_ROCE
, &init_data
);
942 p_req_ramrod
= &p_ent
->ramrod
.roce_query_qp_req
;
943 DMA_REGPAIR_LE(p_req_ramrod
->output_params_addr
, req_ramrod_res_phys
);
945 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
949 out_params
->sq_psn
= le32_to_cpu(p_req_ramrod_res
->psn
);
950 sq_err_state
= GET_FIELD(le32_to_cpu(p_req_ramrod_res
->flags
),
951 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG
);
953 GET_FIELD(le32_to_cpu(p_req_ramrod_res
->flags
),
954 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG
);
956 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
, sizeof(*p_req_ramrod_res
),
957 p_req_ramrod_res
, req_ramrod_res_phys
);
959 out_params
->draining
= false;
961 if (rq_err_state
|| sq_err_state
)
962 qp
->cur_state
= QED_ROCE_QP_STATE_ERR
;
963 else if (sq_draining
)
964 out_params
->draining
= true;
965 out_params
->state
= qp
->cur_state
;
970 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
, sizeof(*p_req_ramrod_res
),
971 p_req_ramrod_res
, req_ramrod_res_phys
);
974 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
, sizeof(*p_resp_ramrod_res
),
975 p_resp_ramrod_res
, resp_ramrod_res_phys
);
979 int qed_roce_destroy_qp(struct qed_hwfn
*p_hwfn
, struct qed_rdma_qp
*qp
)
984 /* Destroys the specified QP */
985 if ((qp
->cur_state
!= QED_ROCE_QP_STATE_RESET
) &&
986 (qp
->cur_state
!= QED_ROCE_QP_STATE_ERR
) &&
987 (qp
->cur_state
!= QED_ROCE_QP_STATE_INIT
)) {
989 "QP must be in error, reset or init state before destroying it\n");
993 if (qp
->cur_state
!= QED_ROCE_QP_STATE_RESET
) {
994 rc
= qed_roce_sp_destroy_qp_responder(p_hwfn
, qp
,
999 /* Send destroy requester ramrod */
1000 rc
= qed_roce_sp_destroy_qp_requester(p_hwfn
, qp
);
1008 int qed_roce_modify_qp(struct qed_hwfn
*p_hwfn
,
1009 struct qed_rdma_qp
*qp
,
1010 enum qed_roce_qp_state prev_state
,
1011 struct qed_rdma_modify_qp_in_params
*params
)
1015 /* Perform additional operations according to the current state and the
1018 if (((prev_state
== QED_ROCE_QP_STATE_INIT
) ||
1019 (prev_state
== QED_ROCE_QP_STATE_RESET
)) &&
1020 (qp
->cur_state
== QED_ROCE_QP_STATE_RTR
)) {
1021 /* Init->RTR or Reset->RTR */
1022 rc
= qed_roce_sp_create_responder(p_hwfn
, qp
);
1024 } else if ((prev_state
== QED_ROCE_QP_STATE_RTR
) &&
1025 (qp
->cur_state
== QED_ROCE_QP_STATE_RTS
)) {
1027 rc
= qed_roce_sp_create_requester(p_hwfn
, qp
);
1031 /* Send modify responder ramrod */
1032 rc
= qed_roce_sp_modify_responder(p_hwfn
, qp
, false,
1033 params
->modify_flags
);
1035 } else if ((prev_state
== QED_ROCE_QP_STATE_RTS
) &&
1036 (qp
->cur_state
== QED_ROCE_QP_STATE_RTS
)) {
1038 rc
= qed_roce_sp_modify_responder(p_hwfn
, qp
, false,
1039 params
->modify_flags
);
1043 rc
= qed_roce_sp_modify_requester(p_hwfn
, qp
, false, false,
1044 params
->modify_flags
);
1046 } else if ((prev_state
== QED_ROCE_QP_STATE_RTS
) &&
1047 (qp
->cur_state
== QED_ROCE_QP_STATE_SQD
)) {
1049 rc
= qed_roce_sp_modify_requester(p_hwfn
, qp
, true, false,
1050 params
->modify_flags
);
1052 } else if ((prev_state
== QED_ROCE_QP_STATE_SQD
) &&
1053 (qp
->cur_state
== QED_ROCE_QP_STATE_SQD
)) {
1055 rc
= qed_roce_sp_modify_responder(p_hwfn
, qp
, false,
1056 params
->modify_flags
);
1060 rc
= qed_roce_sp_modify_requester(p_hwfn
, qp
, false, false,
1061 params
->modify_flags
);
1063 } else if ((prev_state
== QED_ROCE_QP_STATE_SQD
) &&
1064 (qp
->cur_state
== QED_ROCE_QP_STATE_RTS
)) {
1066 rc
= qed_roce_sp_modify_responder(p_hwfn
, qp
, false,
1067 params
->modify_flags
);
1071 rc
= qed_roce_sp_modify_requester(p_hwfn
, qp
, false, false,
1072 params
->modify_flags
);
1075 } else if (qp
->cur_state
== QED_ROCE_QP_STATE_ERR
) {
1077 rc
= qed_roce_sp_modify_responder(p_hwfn
, qp
, true,
1078 params
->modify_flags
);
1082 rc
= qed_roce_sp_modify_requester(p_hwfn
, qp
, false, true,
1083 params
->modify_flags
);
1085 } else if (qp
->cur_state
== QED_ROCE_QP_STATE_RESET
) {
1086 /* Any state -> RESET */
1089 /* Send destroy responder ramrod */
1090 rc
= qed_roce_sp_destroy_qp_responder(p_hwfn
,
1097 qp
->cq_prod
= cq_prod
;
1099 rc
= qed_roce_sp_destroy_qp_requester(p_hwfn
, qp
);
1101 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "0\n");
1107 static void qed_roce_free_real_icid(struct qed_hwfn
*p_hwfn
, u16 icid
)
1109 struct qed_rdma_info
*p_rdma_info
= p_hwfn
->p_rdma_info
;
1110 u32 start_cid
, cid
, xcid
;
1112 /* an even icid belongs to a responder while an odd icid belongs to a
1113 * requester. The 'cid' received as an input can be either. We calculate
1114 * the "partner" icid and call it xcid. Only if both are free then the
1115 * "cid" map can be cleared.
1117 start_cid
= qed_cxt_get_proto_cid_start(p_hwfn
, p_rdma_info
->proto
);
1118 cid
= icid
- start_cid
;
1121 spin_lock_bh(&p_rdma_info
->lock
);
1123 qed_bmap_release_id(p_hwfn
, &p_rdma_info
->real_cid_map
, cid
);
1124 if (qed_bmap_test_id(p_hwfn
, &p_rdma_info
->real_cid_map
, xcid
) == 0) {
1125 qed_bmap_release_id(p_hwfn
, &p_rdma_info
->cid_map
, cid
);
1126 qed_bmap_release_id(p_hwfn
, &p_rdma_info
->cid_map
, xcid
);
1129 spin_unlock_bh(&p_hwfn
->p_rdma_info
->lock
);
1132 void qed_roce_dpm_dcbx(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1136 /* if any QPs are already active, we want to disable DPM, since their
1137 * context information contains information from before the latest DCBx
1138 * update. Otherwise enable it.
1140 val
= qed_rdma_allocated_qps(p_hwfn
) ? true : false;
1141 p_hwfn
->dcbx_no_edpm
= (u8
)val
;
1143 qed_rdma_dpm_conf(p_hwfn
, p_ptt
);
1146 int qed_roce_setup(struct qed_hwfn
*p_hwfn
)
1148 return qed_spq_register_async_cb(p_hwfn
, PROTOCOLID_ROCE
,
1149 qed_roce_async_event
);
1152 int qed_roce_init_hw(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1154 u32 ll2_ethertype_en
;
1156 qed_wr(p_hwfn
, p_ptt
, PRS_REG_ROCE_DEST_QP_MAX_PF
, 0);
1158 p_hwfn
->rdma_prs_search_reg
= PRS_REG_SEARCH_ROCE
;
1160 ll2_ethertype_en
= qed_rd(p_hwfn
, p_ptt
, PRS_REG_LIGHT_L2_ETHERTYPE_EN
);
1161 qed_wr(p_hwfn
, p_ptt
, PRS_REG_LIGHT_L2_ETHERTYPE_EN
,
1162 (ll2_ethertype_en
| 0x01));
1164 if (qed_cxt_get_proto_cid_start(p_hwfn
, PROTOCOLID_ROCE
) % 2) {
1165 DP_NOTICE(p_hwfn
, "The first RoCE's cid should be even\n");
1169 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Initializing HW - Done\n");