1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
9 #include <linux/bitops.h>
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/list.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/pci.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/string.h>
22 #include <linux/if_vlan.h>
28 #include "qed_init_ops.h"
32 #include "qed_reg_addr.h"
33 #include <linux/qed/qed_rdma_if.h>
38 static void qed_roce_free_real_icid(struct qed_hwfn
*p_hwfn
, u16 icid
);
40 static int qed_roce_async_event(struct qed_hwfn
*p_hwfn
, u8 fw_event_code
,
41 __le16 echo
, union event_ring_data
*data
,
44 struct qed_rdma_events events
= p_hwfn
->p_rdma_info
->events
;
45 union rdma_eqe_data
*rdata
= &data
->rdma_data
;
47 if (fw_event_code
== ROCE_ASYNC_EVENT_DESTROY_QP_DONE
) {
48 u16 icid
= (u16
)le32_to_cpu(rdata
->rdma_destroy_qp_data
.cid
);
50 /* icid release in this async event can occur only if the icid
51 * was offloaded to the FW. In case it wasn't offloaded this is
52 * handled in qed_roce_sp_destroy_qp.
54 qed_roce_free_real_icid(p_hwfn
, icid
);
55 } else if (fw_event_code
== ROCE_ASYNC_EVENT_SRQ_EMPTY
||
56 fw_event_code
== ROCE_ASYNC_EVENT_SRQ_LIMIT
) {
57 u16 srq_id
= (u16
)le32_to_cpu(rdata
->async_handle
.lo
);
59 events
.affiliated_event(events
.context
, fw_event_code
,
62 events
.affiliated_event(events
.context
, fw_event_code
,
63 (void *)&rdata
->async_handle
);
69 void qed_roce_stop(struct qed_hwfn
*p_hwfn
)
71 struct qed_bmap
*rcid_map
= &p_hwfn
->p_rdma_info
->real_cid_map
;
74 /* when destroying a_RoCE QP the control is returned to the user after
75 * the synchronous part. The asynchronous part may take a little longer.
76 * We delay for a short while if an async destroy QP is still expected.
77 * Beyond the added delay we clear the bitmap anyway.
79 while (!bitmap_empty(rcid_map
->bitmap
, rcid_map
->max_count
)) {
80 /* If the HW device is during recovery, all resources are
81 * immediately reset without receiving a per-cid indication
82 * from HW. In this case we don't expect the cid bitmap to be
85 if (p_hwfn
->cdev
->recov_in_prog
)
89 if (wait_count
++ > 20) {
90 DP_NOTICE(p_hwfn
, "cid bitmap wait timed out\n");
96 static void qed_rdma_copy_gids(struct qed_rdma_qp
*qp
, __le32
*src_gid
,
101 if (qp
->roce_mode
== ROCE_V2_IPV4
) {
102 /* The IPv4 addresses shall be aligned to the highest word.
103 * The lower words must be zero.
105 memset(src_gid
, 0, sizeof(union qed_gid
));
106 memset(dst_gid
, 0, sizeof(union qed_gid
));
107 src_gid
[3] = cpu_to_le32(qp
->sgid
.ipv4_addr
);
108 dst_gid
[3] = cpu_to_le32(qp
->dgid
.ipv4_addr
);
110 /* GIDs and IPv6 addresses coincide in location and size */
111 for (i
= 0; i
< ARRAY_SIZE(qp
->sgid
.dwords
); i
++) {
112 src_gid
[i
] = cpu_to_le32(qp
->sgid
.dwords
[i
]);
113 dst_gid
[i
] = cpu_to_le32(qp
->dgid
.dwords
[i
]);
118 static enum roce_flavor
qed_roce_mode_to_flavor(enum roce_mode roce_mode
)
128 return MAX_ROCE_FLAVOR
;
132 static void qed_roce_free_cid_pair(struct qed_hwfn
*p_hwfn
, u16 cid
)
134 spin_lock_bh(&p_hwfn
->p_rdma_info
->lock
);
135 qed_bmap_release_id(p_hwfn
, &p_hwfn
->p_rdma_info
->cid_map
, cid
);
136 qed_bmap_release_id(p_hwfn
, &p_hwfn
->p_rdma_info
->cid_map
, cid
+ 1);
137 spin_unlock_bh(&p_hwfn
->p_rdma_info
->lock
);
140 int qed_roce_alloc_cid(struct qed_hwfn
*p_hwfn
, u16
*cid
)
142 struct qed_rdma_info
*p_rdma_info
= p_hwfn
->p_rdma_info
;
147 spin_lock_bh(&p_hwfn
->p_rdma_info
->lock
);
148 rc
= qed_rdma_bmap_alloc_id(p_hwfn
, &p_rdma_info
->cid_map
,
151 spin_unlock_bh(&p_rdma_info
->lock
);
155 rc
= qed_rdma_bmap_alloc_id(p_hwfn
, &p_rdma_info
->cid_map
,
158 spin_unlock_bh(&p_rdma_info
->lock
);
162 /* the two icid's should be adjacent */
163 if ((requester_icid
- responder_icid
) != 1) {
164 DP_NOTICE(p_hwfn
, "Failed to allocate two adjacent qp's'\n");
169 responder_icid
+= qed_cxt_get_proto_cid_start(p_hwfn
,
171 requester_icid
+= qed_cxt_get_proto_cid_start(p_hwfn
,
174 /* If these icids require a new ILT line allocate DMA-able context for
177 rc
= qed_cxt_dynamic_ilt_alloc(p_hwfn
, QED_ELEM_CXT
, responder_icid
);
181 rc
= qed_cxt_dynamic_ilt_alloc(p_hwfn
, QED_ELEM_CXT
, requester_icid
);
185 *cid
= (u16
)responder_icid
;
189 spin_lock_bh(&p_rdma_info
->lock
);
190 qed_bmap_release_id(p_hwfn
, &p_rdma_info
->cid_map
, responder_icid
);
191 qed_bmap_release_id(p_hwfn
, &p_rdma_info
->cid_map
, requester_icid
);
193 spin_unlock_bh(&p_rdma_info
->lock
);
194 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
,
195 "Allocate CID - failed, rc = %d\n", rc
);
199 static void qed_roce_set_real_cid(struct qed_hwfn
*p_hwfn
, u32 cid
)
201 spin_lock_bh(&p_hwfn
->p_rdma_info
->lock
);
202 qed_bmap_set_id(p_hwfn
, &p_hwfn
->p_rdma_info
->real_cid_map
, cid
);
203 spin_unlock_bh(&p_hwfn
->p_rdma_info
->lock
);
206 static u8
qed_roce_get_qp_tc(struct qed_hwfn
*p_hwfn
, struct qed_rdma_qp
*qp
)
211 pri
= (qp
->vlan_id
& VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
212 tc
= qed_dcbx_get_priority_tc(p_hwfn
, pri
);
215 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
216 "qp icid %u tc: %u (vlan priority %s)\n",
217 qp
->icid
, tc
, qp
->vlan_id
? "enabled" : "disabled");
222 static int qed_roce_sp_create_responder(struct qed_hwfn
*p_hwfn
,
223 struct qed_rdma_qp
*qp
)
225 struct roce_create_qp_resp_ramrod_data
*p_ramrod
;
226 u16 regular_latency_queue
, low_latency_queue
;
227 struct qed_sp_init_data init_data
;
228 struct qed_spq_entry
*p_ent
;
229 enum protocol_type proto
;
237 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "icid = %08x\n", qp
->icid
);
239 /* Allocate DMA-able memory for IRQ */
240 qp
->irq_num_pages
= 1;
241 qp
->irq
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
243 &qp
->irq_phys_addr
, GFP_KERNEL
);
247 "qed create responder failed: cannot allocate memory (irq). rc = %d\n",
253 memset(&init_data
, 0, sizeof(init_data
));
254 init_data
.cid
= qp
->icid
;
255 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
256 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
258 rc
= qed_sp_init_request(p_hwfn
, &p_ent
, ROCE_RAMROD_CREATE_QP
,
259 PROTOCOLID_ROCE
, &init_data
);
263 SET_FIELD(flags
, ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR
,
264 qed_roce_mode_to_flavor(qp
->roce_mode
));
266 SET_FIELD(flags
, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN
,
267 qp
->incoming_rdma_read_en
);
269 SET_FIELD(flags
, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN
,
270 qp
->incoming_rdma_write_en
);
272 SET_FIELD(flags
, ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN
,
273 qp
->incoming_atomic_en
);
275 SET_FIELD(flags
, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN
,
276 qp
->e2e_flow_control_en
);
278 SET_FIELD(flags
, ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG
, qp
->use_srq
);
280 SET_FIELD(flags
, ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN
,
281 qp
->fmr_and_reserved_lkey
);
283 SET_FIELD(flags
, ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER
,
284 qp
->min_rnr_nak_timer
);
286 SET_FIELD(flags
, ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG
,
287 qed_rdma_is_xrc_qp(qp
));
289 p_ramrod
= &p_ent
->ramrod
.roce_create_qp_resp
;
290 p_ramrod
->flags
= cpu_to_le32(flags
);
291 p_ramrod
->max_ird
= qp
->max_rd_atomic_resp
;
292 p_ramrod
->traffic_class
= qp
->traffic_class_tos
;
293 p_ramrod
->hop_limit
= qp
->hop_limit_ttl
;
294 p_ramrod
->irq_num_pages
= qp
->irq_num_pages
;
295 p_ramrod
->p_key
= cpu_to_le16(qp
->pkey
);
296 p_ramrod
->flow_label
= cpu_to_le32(qp
->flow_label
);
297 p_ramrod
->dst_qp_id
= cpu_to_le32(qp
->dest_qp
);
298 p_ramrod
->mtu
= cpu_to_le16(qp
->mtu
);
299 p_ramrod
->initial_psn
= cpu_to_le32(qp
->rq_psn
);
300 p_ramrod
->pd
= cpu_to_le16(qp
->pd
);
301 p_ramrod
->rq_num_pages
= cpu_to_le16(qp
->rq_num_pages
);
302 DMA_REGPAIR_LE(p_ramrod
->rq_pbl_addr
, qp
->rq_pbl_ptr
);
303 DMA_REGPAIR_LE(p_ramrod
->irq_pbl_addr
, qp
->irq_phys_addr
);
304 qed_rdma_copy_gids(qp
, p_ramrod
->src_gid
, p_ramrod
->dst_gid
);
305 p_ramrod
->qp_handle_for_async
.hi
= qp
->qp_handle_async
.hi
;
306 p_ramrod
->qp_handle_for_async
.lo
= qp
->qp_handle_async
.lo
;
307 p_ramrod
->qp_handle_for_cqe
.hi
= qp
->qp_handle
.hi
;
308 p_ramrod
->qp_handle_for_cqe
.lo
= qp
->qp_handle
.lo
;
309 p_ramrod
->cq_cid
= cpu_to_le32((p_hwfn
->hw_info
.opaque_fid
<< 16) |
311 p_ramrod
->xrc_domain
= cpu_to_le16(qp
->xrcd_id
);
313 tc
= qed_roce_get_qp_tc(p_hwfn
, qp
);
314 regular_latency_queue
= qed_get_cm_pq_idx_ofld_mtc(p_hwfn
, tc
);
315 low_latency_queue
= qed_get_cm_pq_idx_llt_mtc(p_hwfn
, tc
);
316 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
317 "qp icid %u pqs: regular_latency %u low_latency %u\n",
318 qp
->icid
, regular_latency_queue
- CM_TX_PQ_BASE
,
319 low_latency_queue
- CM_TX_PQ_BASE
);
320 p_ramrod
->regular_latency_phy_queue
=
321 cpu_to_le16(regular_latency_queue
);
322 p_ramrod
->low_latency_phy_queue
=
323 cpu_to_le16(low_latency_queue
);
325 p_ramrod
->dpi
= cpu_to_le16(qp
->dpi
);
327 qed_rdma_set_fw_mac(p_ramrod
->remote_mac_addr
, qp
->remote_mac_addr
);
328 qed_rdma_set_fw_mac(p_ramrod
->local_mac_addr
, qp
->local_mac_addr
);
330 p_ramrod
->udp_src_port
= cpu_to_le16(qp
->udp_src_port
);
331 p_ramrod
->vlan_id
= cpu_to_le16(qp
->vlan_id
);
332 p_ramrod
->srq_id
.srq_idx
= cpu_to_le16(qp
->srq_id
);
333 p_ramrod
->srq_id
.opaque_fid
= cpu_to_le16(p_hwfn
->hw_info
.opaque_fid
);
335 p_ramrod
->stats_counter_id
= RESC_START(p_hwfn
, QED_RDMA_STATS_QUEUE
) +
338 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
342 qp
->resp_offloaded
= true;
345 proto
= p_hwfn
->p_rdma_info
->proto
;
346 qed_roce_set_real_cid(p_hwfn
, qp
->icid
-
347 qed_cxt_get_proto_cid_start(p_hwfn
, proto
));
352 DP_NOTICE(p_hwfn
, "create responder - failed, rc = %d\n", rc
);
353 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
354 qp
->irq_num_pages
* RDMA_RING_PAGE_SIZE
,
355 qp
->irq
, qp
->irq_phys_addr
);
360 static int qed_roce_sp_create_requester(struct qed_hwfn
*p_hwfn
,
361 struct qed_rdma_qp
*qp
)
363 struct roce_create_qp_req_ramrod_data
*p_ramrod
;
364 u16 regular_latency_queue
, low_latency_queue
;
365 struct qed_sp_init_data init_data
;
366 struct qed_spq_entry
*p_ent
;
367 enum protocol_type proto
;
375 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "icid = %08x\n", qp
->icid
);
377 /* Allocate DMA-able memory for ORQ */
378 qp
->orq_num_pages
= 1;
379 qp
->orq
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
381 &qp
->orq_phys_addr
, GFP_KERNEL
);
385 "qed create requester failed: cannot allocate memory (orq). rc = %d\n",
391 memset(&init_data
, 0, sizeof(init_data
));
392 init_data
.cid
= qp
->icid
+ 1;
393 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
394 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
396 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
397 ROCE_RAMROD_CREATE_QP
,
398 PROTOCOLID_ROCE
, &init_data
);
402 SET_FIELD(flags
, ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR
,
403 qed_roce_mode_to_flavor(qp
->roce_mode
));
405 SET_FIELD(flags
, ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN
,
406 qp
->fmr_and_reserved_lkey
);
408 SET_FIELD(flags
, ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP
,
411 SET_FIELD(flags
, ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT
,
414 SET_FIELD(flags
, ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT
,
417 SET_FIELD(flags
, ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG
,
418 qed_rdma_is_xrc_qp(qp
));
420 p_ramrod
= &p_ent
->ramrod
.roce_create_qp_req
;
421 p_ramrod
->flags
= cpu_to_le16(flags
);
423 SET_FIELD(p_ramrod
->flags2
, ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE
,
426 p_ramrod
->max_ord
= qp
->max_rd_atomic_req
;
427 p_ramrod
->traffic_class
= qp
->traffic_class_tos
;
428 p_ramrod
->hop_limit
= qp
->hop_limit_ttl
;
429 p_ramrod
->orq_num_pages
= qp
->orq_num_pages
;
430 p_ramrod
->p_key
= cpu_to_le16(qp
->pkey
);
431 p_ramrod
->flow_label
= cpu_to_le32(qp
->flow_label
);
432 p_ramrod
->dst_qp_id
= cpu_to_le32(qp
->dest_qp
);
433 p_ramrod
->ack_timeout_val
= cpu_to_le32(qp
->ack_timeout
);
434 p_ramrod
->mtu
= cpu_to_le16(qp
->mtu
);
435 p_ramrod
->initial_psn
= cpu_to_le32(qp
->sq_psn
);
436 p_ramrod
->pd
= cpu_to_le16(qp
->pd
);
437 p_ramrod
->sq_num_pages
= cpu_to_le16(qp
->sq_num_pages
);
438 DMA_REGPAIR_LE(p_ramrod
->sq_pbl_addr
, qp
->sq_pbl_ptr
);
439 DMA_REGPAIR_LE(p_ramrod
->orq_pbl_addr
, qp
->orq_phys_addr
);
440 qed_rdma_copy_gids(qp
, p_ramrod
->src_gid
, p_ramrod
->dst_gid
);
441 p_ramrod
->qp_handle_for_async
.hi
= qp
->qp_handle_async
.hi
;
442 p_ramrod
->qp_handle_for_async
.lo
= qp
->qp_handle_async
.lo
;
443 p_ramrod
->qp_handle_for_cqe
.hi
= qp
->qp_handle
.hi
;
444 p_ramrod
->qp_handle_for_cqe
.lo
= qp
->qp_handle
.lo
;
446 cpu_to_le32((p_hwfn
->hw_info
.opaque_fid
<< 16) | qp
->sq_cq_id
);
448 tc
= qed_roce_get_qp_tc(p_hwfn
, qp
);
449 regular_latency_queue
= qed_get_cm_pq_idx_ofld_mtc(p_hwfn
, tc
);
450 low_latency_queue
= qed_get_cm_pq_idx_llt_mtc(p_hwfn
, tc
);
451 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
452 "qp icid %u pqs: regular_latency %u low_latency %u\n",
453 qp
->icid
, regular_latency_queue
- CM_TX_PQ_BASE
,
454 low_latency_queue
- CM_TX_PQ_BASE
);
455 p_ramrod
->regular_latency_phy_queue
=
456 cpu_to_le16(regular_latency_queue
);
457 p_ramrod
->low_latency_phy_queue
=
458 cpu_to_le16(low_latency_queue
);
460 p_ramrod
->dpi
= cpu_to_le16(qp
->dpi
);
462 qed_rdma_set_fw_mac(p_ramrod
->remote_mac_addr
, qp
->remote_mac_addr
);
463 qed_rdma_set_fw_mac(p_ramrod
->local_mac_addr
, qp
->local_mac_addr
);
465 p_ramrod
->udp_src_port
= cpu_to_le16(qp
->udp_src_port
);
466 p_ramrod
->vlan_id
= cpu_to_le16(qp
->vlan_id
);
467 p_ramrod
->stats_counter_id
= RESC_START(p_hwfn
, QED_RDMA_STATS_QUEUE
) +
470 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
474 qp
->req_offloaded
= true;
475 proto
= p_hwfn
->p_rdma_info
->proto
;
476 qed_roce_set_real_cid(p_hwfn
,
478 qed_cxt_get_proto_cid_start(p_hwfn
, proto
));
483 DP_NOTICE(p_hwfn
, "Create requested - failed, rc = %d\n", rc
);
484 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
485 qp
->orq_num_pages
* RDMA_RING_PAGE_SIZE
,
486 qp
->orq
, qp
->orq_phys_addr
);
490 static int qed_roce_sp_modify_responder(struct qed_hwfn
*p_hwfn
,
491 struct qed_rdma_qp
*qp
,
492 bool move_to_err
, u32 modify_flags
)
494 struct roce_modify_qp_resp_ramrod_data
*p_ramrod
;
495 struct qed_sp_init_data init_data
;
496 struct qed_spq_entry
*p_ent
;
503 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "icid = %08x\n", qp
->icid
);
505 if (move_to_err
&& !qp
->resp_offloaded
)
509 memset(&init_data
, 0, sizeof(init_data
));
510 init_data
.cid
= qp
->icid
;
511 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
512 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
514 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
515 ROCE_EVENT_MODIFY_QP
,
516 PROTOCOLID_ROCE
, &init_data
);
518 DP_NOTICE(p_hwfn
, "rc = %d\n", rc
);
522 SET_FIELD(flags
, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG
,
525 SET_FIELD(flags
, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN
,
526 qp
->incoming_rdma_read_en
);
528 SET_FIELD(flags
, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN
,
529 qp
->incoming_rdma_write_en
);
531 SET_FIELD(flags
, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN
,
532 qp
->incoming_atomic_en
);
534 SET_FIELD(flags
, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN
,
535 qp
->e2e_flow_control_en
);
537 SET_FIELD(flags
, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG
,
538 GET_FIELD(modify_flags
,
539 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN
));
541 SET_FIELD(flags
, ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG
,
542 GET_FIELD(modify_flags
, QED_ROCE_MODIFY_QP_VALID_PKEY
));
544 SET_FIELD(flags
, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG
,
545 GET_FIELD(modify_flags
,
546 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR
));
548 SET_FIELD(flags
, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG
,
549 GET_FIELD(modify_flags
,
550 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP
));
552 SET_FIELD(flags
, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG
,
553 GET_FIELD(modify_flags
,
554 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER
));
556 p_ramrod
= &p_ent
->ramrod
.roce_modify_qp_resp
;
557 p_ramrod
->flags
= cpu_to_le16(flags
);
559 p_ramrod
->fields
= 0;
560 SET_FIELD(p_ramrod
->fields
,
561 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER
,
562 qp
->min_rnr_nak_timer
);
564 p_ramrod
->max_ird
= qp
->max_rd_atomic_resp
;
565 p_ramrod
->traffic_class
= qp
->traffic_class_tos
;
566 p_ramrod
->hop_limit
= qp
->hop_limit_ttl
;
567 p_ramrod
->p_key
= cpu_to_le16(qp
->pkey
);
568 p_ramrod
->flow_label
= cpu_to_le32(qp
->flow_label
);
569 p_ramrod
->mtu
= cpu_to_le16(qp
->mtu
);
570 qed_rdma_copy_gids(qp
, p_ramrod
->src_gid
, p_ramrod
->dst_gid
);
571 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
573 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Modify responder, rc = %d\n", rc
);
577 static int qed_roce_sp_modify_requester(struct qed_hwfn
*p_hwfn
,
578 struct qed_rdma_qp
*qp
,
580 bool move_to_err
, u32 modify_flags
)
582 struct roce_modify_qp_req_ramrod_data
*p_ramrod
;
583 struct qed_sp_init_data init_data
;
584 struct qed_spq_entry
*p_ent
;
591 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "icid = %08x\n", qp
->icid
);
593 if (move_to_err
&& !(qp
->req_offloaded
))
597 memset(&init_data
, 0, sizeof(init_data
));
598 init_data
.cid
= qp
->icid
+ 1;
599 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
600 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
602 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
603 ROCE_EVENT_MODIFY_QP
,
604 PROTOCOLID_ROCE
, &init_data
);
606 DP_NOTICE(p_hwfn
, "rc = %d\n", rc
);
610 SET_FIELD(flags
, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG
,
613 SET_FIELD(flags
, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG
,
616 SET_FIELD(flags
, ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY
,
619 SET_FIELD(flags
, ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG
,
620 GET_FIELD(modify_flags
, QED_ROCE_MODIFY_QP_VALID_PKEY
));
622 SET_FIELD(flags
, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG
,
623 GET_FIELD(modify_flags
,
624 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR
));
626 SET_FIELD(flags
, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG
,
627 GET_FIELD(modify_flags
,
628 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ
));
630 SET_FIELD(flags
, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG
,
631 GET_FIELD(modify_flags
,
632 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT
));
634 SET_FIELD(flags
, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG
,
635 GET_FIELD(modify_flags
, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT
));
637 SET_FIELD(flags
, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG
,
638 GET_FIELD(modify_flags
,
639 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT
));
641 p_ramrod
= &p_ent
->ramrod
.roce_modify_qp_req
;
642 p_ramrod
->flags
= cpu_to_le16(flags
);
644 p_ramrod
->fields
= 0;
645 SET_FIELD(p_ramrod
->fields
,
646 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT
, qp
->retry_cnt
);
647 SET_FIELD(p_ramrod
->fields
, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT
,
650 p_ramrod
->max_ord
= qp
->max_rd_atomic_req
;
651 p_ramrod
->traffic_class
= qp
->traffic_class_tos
;
652 p_ramrod
->hop_limit
= qp
->hop_limit_ttl
;
653 p_ramrod
->p_key
= cpu_to_le16(qp
->pkey
);
654 p_ramrod
->flow_label
= cpu_to_le32(qp
->flow_label
);
655 p_ramrod
->ack_timeout_val
= cpu_to_le32(qp
->ack_timeout
);
656 p_ramrod
->mtu
= cpu_to_le16(qp
->mtu
);
657 qed_rdma_copy_gids(qp
, p_ramrod
->src_gid
, p_ramrod
->dst_gid
);
658 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
660 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Modify requester, rc = %d\n", rc
);
664 static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn
*p_hwfn
,
665 struct qed_rdma_qp
*qp
,
668 struct roce_destroy_qp_resp_output_params
*p_ramrod_res
;
669 struct roce_destroy_qp_resp_ramrod_data
*p_ramrod
;
670 struct qed_sp_init_data init_data
;
671 struct qed_spq_entry
*p_ent
;
672 dma_addr_t ramrod_res_phys
;
680 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "icid = %08x\n", qp
->icid
);
681 *cq_prod
= qp
->cq_prod
;
683 if (!qp
->resp_offloaded
) {
684 /* If a responder was never offload, we need to free the cids
685 * allocated in create_qp as a FW async event will never arrive
690 qed_cxt_get_proto_cid_start(p_hwfn
,
691 p_hwfn
->p_rdma_info
->proto
);
692 qed_roce_free_cid_pair(p_hwfn
, (u16
)cid
);
698 memset(&init_data
, 0, sizeof(init_data
));
699 init_data
.cid
= qp
->icid
;
700 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
701 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
703 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
704 ROCE_RAMROD_DESTROY_QP
,
705 PROTOCOLID_ROCE
, &init_data
);
709 p_ramrod
= &p_ent
->ramrod
.roce_destroy_qp_resp
;
711 p_ramrod_res
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
712 sizeof(*p_ramrod_res
),
713 &ramrod_res_phys
, GFP_KERNEL
);
718 "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
720 qed_sp_destroy_request(p_hwfn
, p_ent
);
724 DMA_REGPAIR_LE(p_ramrod
->output_params_addr
, ramrod_res_phys
);
726 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
730 *cq_prod
= le32_to_cpu(p_ramrod_res
->cq_prod
);
731 qp
->cq_prod
= *cq_prod
;
733 /* Free IRQ - only if ramrod succeeded, in case FW is still using it */
734 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
735 qp
->irq_num_pages
* RDMA_RING_PAGE_SIZE
,
736 qp
->irq
, qp
->irq_phys_addr
);
738 qp
->resp_offloaded
= false;
740 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Destroy responder, rc = %d\n", rc
);
743 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
744 sizeof(struct roce_destroy_qp_resp_output_params
),
745 p_ramrod_res
, ramrod_res_phys
);
750 static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn
*p_hwfn
,
751 struct qed_rdma_qp
*qp
)
753 struct roce_destroy_qp_req_output_params
*p_ramrod_res
;
754 struct roce_destroy_qp_req_ramrod_data
*p_ramrod
;
755 struct qed_sp_init_data init_data
;
756 struct qed_spq_entry
*p_ent
;
757 dma_addr_t ramrod_res_phys
;
763 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "icid = %08x\n", qp
->icid
);
765 if (!qp
->req_offloaded
)
768 p_ramrod_res
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
769 sizeof(*p_ramrod_res
),
770 &ramrod_res_phys
, GFP_KERNEL
);
773 "qed destroy requester failed: cannot allocate memory (ramrod)\n");
778 memset(&init_data
, 0, sizeof(init_data
));
779 init_data
.cid
= qp
->icid
+ 1;
780 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
781 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
783 rc
= qed_sp_init_request(p_hwfn
, &p_ent
, ROCE_RAMROD_DESTROY_QP
,
784 PROTOCOLID_ROCE
, &init_data
);
788 p_ramrod
= &p_ent
->ramrod
.roce_destroy_qp_req
;
789 DMA_REGPAIR_LE(p_ramrod
->output_params_addr
, ramrod_res_phys
);
791 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
795 /* Free ORQ - only if ramrod succeeded, in case FW is still using it */
796 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
797 qp
->orq_num_pages
* RDMA_RING_PAGE_SIZE
,
798 qp
->orq
, qp
->orq_phys_addr
);
800 qp
->req_offloaded
= false;
802 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Destroy requester, rc = %d\n", rc
);
805 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
, sizeof(*p_ramrod_res
),
806 p_ramrod_res
, ramrod_res_phys
);
811 int qed_roce_query_qp(struct qed_hwfn
*p_hwfn
,
812 struct qed_rdma_qp
*qp
,
813 struct qed_rdma_query_qp_out_params
*out_params
)
815 struct roce_query_qp_resp_output_params
*p_resp_ramrod_res
;
816 struct roce_query_qp_req_output_params
*p_req_ramrod_res
;
817 struct roce_query_qp_resp_ramrod_data
*p_resp_ramrod
;
818 struct roce_query_qp_req_ramrod_data
*p_req_ramrod
;
819 struct qed_sp_init_data init_data
;
820 dma_addr_t resp_ramrod_res_phys
;
821 dma_addr_t req_ramrod_res_phys
;
822 struct qed_spq_entry
*p_ent
;
828 if ((!(qp
->resp_offloaded
)) && (!(qp
->req_offloaded
))) {
829 /* We can't send ramrod to the fw since this qp wasn't offloaded
832 out_params
->draining
= false;
833 out_params
->rq_psn
= qp
->rq_psn
;
834 out_params
->sq_psn
= qp
->sq_psn
;
835 out_params
->state
= qp
->cur_state
;
837 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "No QPs as no offload\n");
841 if (!(qp
->resp_offloaded
)) {
843 "The responder's qp should be offloaded before requester's\n");
847 /* Send a query responder ramrod to FW to get RQ-PSN and state */
849 dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
850 sizeof(*p_resp_ramrod_res
),
851 &resp_ramrod_res_phys
, GFP_KERNEL
);
852 if (!p_resp_ramrod_res
) {
854 "qed query qp failed: cannot allocate memory (ramrod)\n");
859 memset(&init_data
, 0, sizeof(init_data
));
860 init_data
.cid
= qp
->icid
;
861 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
862 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
863 rc
= qed_sp_init_request(p_hwfn
, &p_ent
, ROCE_RAMROD_QUERY_QP
,
864 PROTOCOLID_ROCE
, &init_data
);
868 p_resp_ramrod
= &p_ent
->ramrod
.roce_query_qp_resp
;
869 DMA_REGPAIR_LE(p_resp_ramrod
->output_params_addr
, resp_ramrod_res_phys
);
871 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
875 out_params
->rq_psn
= le32_to_cpu(p_resp_ramrod_res
->psn
);
876 rq_err_state
= GET_FIELD(le32_to_cpu(p_resp_ramrod_res
->flags
),
877 ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG
);
879 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
, sizeof(*p_resp_ramrod_res
),
880 p_resp_ramrod_res
, resp_ramrod_res_phys
);
882 if (!(qp
->req_offloaded
)) {
883 /* Don't send query qp for the requester */
884 out_params
->sq_psn
= qp
->sq_psn
;
885 out_params
->draining
= false;
888 qp
->cur_state
= QED_ROCE_QP_STATE_ERR
;
890 out_params
->state
= qp
->cur_state
;
895 /* Send a query requester ramrod to FW to get SQ-PSN and state */
896 p_req_ramrod_res
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
897 sizeof(*p_req_ramrod_res
),
898 &req_ramrod_res_phys
,
900 if (!p_req_ramrod_res
) {
903 "qed query qp failed: cannot allocate memory (ramrod)\n");
908 init_data
.cid
= qp
->icid
+ 1;
909 rc
= qed_sp_init_request(p_hwfn
, &p_ent
, ROCE_RAMROD_QUERY_QP
,
910 PROTOCOLID_ROCE
, &init_data
);
914 p_req_ramrod
= &p_ent
->ramrod
.roce_query_qp_req
;
915 DMA_REGPAIR_LE(p_req_ramrod
->output_params_addr
, req_ramrod_res_phys
);
917 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
921 out_params
->sq_psn
= le32_to_cpu(p_req_ramrod_res
->psn
);
922 sq_err_state
= GET_FIELD(le32_to_cpu(p_req_ramrod_res
->flags
),
923 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG
);
925 GET_FIELD(le32_to_cpu(p_req_ramrod_res
->flags
),
926 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG
);
928 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
, sizeof(*p_req_ramrod_res
),
929 p_req_ramrod_res
, req_ramrod_res_phys
);
931 out_params
->draining
= false;
933 if (rq_err_state
|| sq_err_state
)
934 qp
->cur_state
= QED_ROCE_QP_STATE_ERR
;
935 else if (sq_draining
)
936 out_params
->draining
= true;
937 out_params
->state
= qp
->cur_state
;
942 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
, sizeof(*p_req_ramrod_res
),
943 p_req_ramrod_res
, req_ramrod_res_phys
);
946 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
, sizeof(*p_resp_ramrod_res
),
947 p_resp_ramrod_res
, resp_ramrod_res_phys
);
951 int qed_roce_destroy_qp(struct qed_hwfn
*p_hwfn
, struct qed_rdma_qp
*qp
)
956 /* Destroys the specified QP */
957 if ((qp
->cur_state
!= QED_ROCE_QP_STATE_RESET
) &&
958 (qp
->cur_state
!= QED_ROCE_QP_STATE_ERR
) &&
959 (qp
->cur_state
!= QED_ROCE_QP_STATE_INIT
)) {
961 "QP must be in error, reset or init state before destroying it\n");
965 if (qp
->cur_state
!= QED_ROCE_QP_STATE_RESET
) {
966 rc
= qed_roce_sp_destroy_qp_responder(p_hwfn
, qp
,
971 /* Send destroy requester ramrod */
972 rc
= qed_roce_sp_destroy_qp_requester(p_hwfn
, qp
);
980 int qed_roce_modify_qp(struct qed_hwfn
*p_hwfn
,
981 struct qed_rdma_qp
*qp
,
982 enum qed_roce_qp_state prev_state
,
983 struct qed_rdma_modify_qp_in_params
*params
)
987 /* Perform additional operations according to the current state and the
990 if (((prev_state
== QED_ROCE_QP_STATE_INIT
) ||
991 (prev_state
== QED_ROCE_QP_STATE_RESET
)) &&
992 (qp
->cur_state
== QED_ROCE_QP_STATE_RTR
)) {
993 /* Init->RTR or Reset->RTR */
994 rc
= qed_roce_sp_create_responder(p_hwfn
, qp
);
996 } else if ((prev_state
== QED_ROCE_QP_STATE_RTR
) &&
997 (qp
->cur_state
== QED_ROCE_QP_STATE_RTS
)) {
999 rc
= qed_roce_sp_create_requester(p_hwfn
, qp
);
1003 /* Send modify responder ramrod */
1004 rc
= qed_roce_sp_modify_responder(p_hwfn
, qp
, false,
1005 params
->modify_flags
);
1007 } else if ((prev_state
== QED_ROCE_QP_STATE_RTS
) &&
1008 (qp
->cur_state
== QED_ROCE_QP_STATE_RTS
)) {
1010 rc
= qed_roce_sp_modify_responder(p_hwfn
, qp
, false,
1011 params
->modify_flags
);
1015 rc
= qed_roce_sp_modify_requester(p_hwfn
, qp
, false, false,
1016 params
->modify_flags
);
1018 } else if ((prev_state
== QED_ROCE_QP_STATE_RTS
) &&
1019 (qp
->cur_state
== QED_ROCE_QP_STATE_SQD
)) {
1021 rc
= qed_roce_sp_modify_requester(p_hwfn
, qp
, true, false,
1022 params
->modify_flags
);
1024 } else if ((prev_state
== QED_ROCE_QP_STATE_SQD
) &&
1025 (qp
->cur_state
== QED_ROCE_QP_STATE_SQD
)) {
1027 rc
= qed_roce_sp_modify_responder(p_hwfn
, qp
, false,
1028 params
->modify_flags
);
1032 rc
= qed_roce_sp_modify_requester(p_hwfn
, qp
, false, false,
1033 params
->modify_flags
);
1035 } else if ((prev_state
== QED_ROCE_QP_STATE_SQD
) &&
1036 (qp
->cur_state
== QED_ROCE_QP_STATE_RTS
)) {
1038 rc
= qed_roce_sp_modify_responder(p_hwfn
, qp
, false,
1039 params
->modify_flags
);
1043 rc
= qed_roce_sp_modify_requester(p_hwfn
, qp
, false, false,
1044 params
->modify_flags
);
1047 } else if (qp
->cur_state
== QED_ROCE_QP_STATE_ERR
) {
1049 rc
= qed_roce_sp_modify_responder(p_hwfn
, qp
, true,
1050 params
->modify_flags
);
1054 rc
= qed_roce_sp_modify_requester(p_hwfn
, qp
, false, true,
1055 params
->modify_flags
);
1057 } else if (qp
->cur_state
== QED_ROCE_QP_STATE_RESET
) {
1058 /* Any state -> RESET */
1061 /* Send destroy responder ramrod */
1062 rc
= qed_roce_sp_destroy_qp_responder(p_hwfn
,
1069 qp
->cq_prod
= cq_prod
;
1071 rc
= qed_roce_sp_destroy_qp_requester(p_hwfn
, qp
);
1073 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "0\n");
1079 static void qed_roce_free_real_icid(struct qed_hwfn
*p_hwfn
, u16 icid
)
1081 struct qed_rdma_info
*p_rdma_info
= p_hwfn
->p_rdma_info
;
1082 u32 start_cid
, cid
, xcid
;
1084 /* an even icid belongs to a responder while an odd icid belongs to a
1085 * requester. The 'cid' received as an input can be either. We calculate
1086 * the "partner" icid and call it xcid. Only if both are free then the
1087 * "cid" map can be cleared.
1089 start_cid
= qed_cxt_get_proto_cid_start(p_hwfn
, p_rdma_info
->proto
);
1090 cid
= icid
- start_cid
;
1093 spin_lock_bh(&p_rdma_info
->lock
);
1095 qed_bmap_release_id(p_hwfn
, &p_rdma_info
->real_cid_map
, cid
);
1096 if (qed_bmap_test_id(p_hwfn
, &p_rdma_info
->real_cid_map
, xcid
) == 0) {
1097 qed_bmap_release_id(p_hwfn
, &p_rdma_info
->cid_map
, cid
);
1098 qed_bmap_release_id(p_hwfn
, &p_rdma_info
->cid_map
, xcid
);
1101 spin_unlock_bh(&p_hwfn
->p_rdma_info
->lock
);
1104 void qed_roce_dpm_dcbx(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1108 /* if any QPs are already active, we want to disable DPM, since their
1109 * context information contains information from before the latest DCBx
1110 * update. Otherwise enable it.
1112 val
= qed_rdma_allocated_qps(p_hwfn
) ? true : false;
1113 p_hwfn
->dcbx_no_edpm
= (u8
)val
;
1115 qed_rdma_dpm_conf(p_hwfn
, p_ptt
);
1118 int qed_roce_setup(struct qed_hwfn
*p_hwfn
)
1120 return qed_spq_register_async_cb(p_hwfn
, PROTOCOLID_ROCE
,
1121 qed_roce_async_event
);
1124 int qed_roce_init_hw(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1126 u32 ll2_ethertype_en
;
1128 qed_wr(p_hwfn
, p_ptt
, PRS_REG_ROCE_DEST_QP_MAX_PF
, 0);
1130 p_hwfn
->rdma_prs_search_reg
= PRS_REG_SEARCH_ROCE
;
1132 ll2_ethertype_en
= qed_rd(p_hwfn
, p_ptt
, PRS_REG_LIGHT_L2_ETHERTYPE_EN
);
1133 qed_wr(p_hwfn
, p_ptt
, PRS_REG_LIGHT_L2_ETHERTYPE_EN
,
1134 (ll2_ethertype_en
| 0x01));
1136 if (qed_cxt_get_proto_cid_start(p_hwfn
, PROTOCOLID_ROCE
) % 2) {
1137 DP_NOTICE(p_hwfn
, "The first RoCE's cid should be even\n");
1141 DP_VERBOSE(p_hwfn
, QED_MSG_RDMA
, "Initializing HW - Done\n");