1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* Copyright 2021 Marvell. All rights reserved. */
4 #include <linux/types.h>
5 #include <asm/byteorder.h>
7 #include <linux/delay.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/etherdevice.h>
10 #include <linux/kernel.h>
11 #include <linux/log2.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/stddef.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/list.h>
18 #include <linux/qed/qed_nvmetcp_if.h>
21 #include "qed_dev_api.h"
25 #include "qed_nvmetcp.h"
29 #include "qed_reg_addr.h"
30 #include "qed_nvmetcp_fw_funcs.h"
32 static int qed_nvmetcp_async_event(struct qed_hwfn
*p_hwfn
, u8 fw_event_code
,
33 u16 echo
, union event_ring_data
*data
,
36 if (p_hwfn
->p_nvmetcp_info
->event_cb
) {
37 struct qed_nvmetcp_info
*p_nvmetcp
= p_hwfn
->p_nvmetcp_info
;
39 return p_nvmetcp
->event_cb(p_nvmetcp
->event_context
,
42 DP_NOTICE(p_hwfn
, "nvmetcp async completion is not set\n");
48 static int qed_sp_nvmetcp_func_start(struct qed_hwfn
*p_hwfn
,
49 enum spq_mode comp_mode
,
50 struct qed_spq_comp_cb
*p_comp_addr
,
52 nvmetcp_event_cb_t async_event_cb
)
54 struct nvmetcp_init_ramrod_params
*p_ramrod
= NULL
;
55 struct qed_nvmetcp_pf_params
*p_params
= NULL
;
56 struct scsi_init_func_queues
*p_queue
= NULL
;
57 struct nvmetcp_spe_func_init
*p_init
= NULL
;
58 struct qed_sp_init_data init_data
= {};
59 struct qed_spq_entry
*p_ent
= NULL
;
65 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
66 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
67 init_data
.comp_mode
= comp_mode
;
68 init_data
.p_comp_data
= p_comp_addr
;
69 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
70 NVMETCP_RAMROD_CMD_ID_INIT_FUNC
,
71 PROTOCOLID_TCP_ULP
, &init_data
);
75 p_ramrod
= &p_ent
->ramrod
.nvmetcp_init
;
76 p_init
= &p_ramrod
->nvmetcp_init_spe
;
77 p_params
= &p_hwfn
->pf_params
.nvmetcp_pf_params
;
78 p_queue
= &p_init
->q_params
;
79 p_init
->num_sq_pages_in_ring
= p_params
->num_sq_pages_in_ring
;
80 p_init
->num_r2tq_pages_in_ring
= p_params
->num_r2tq_pages_in_ring
;
81 p_init
->num_uhq_pages_in_ring
= p_params
->num_uhq_pages_in_ring
;
82 p_init
->ll2_rx_queue_id
= RESC_START(p_hwfn
, QED_LL2_RAM_QUEUE
) +
83 p_params
->ll2_ooo_queue_id
;
84 SET_FIELD(p_init
->flags
, NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE
, 1);
85 p_init
->func_params
.log_page_size
= ilog2(PAGE_SIZE
);
86 p_init
->func_params
.num_tasks
= cpu_to_le16(p_params
->num_tasks
);
87 p_init
->debug_flags
= p_params
->debug_mode
;
88 DMA_REGPAIR_LE(p_queue
->glbl_q_params_addr
,
89 p_params
->glbl_q_params_addr
);
90 p_queue
->cq_num_entries
= cpu_to_le16(QED_NVMETCP_FW_CQ_SIZE
);
91 p_queue
->num_queues
= p_params
->num_queues
;
92 val
= RESC_START(p_hwfn
, QED_CMDQS_CQS
);
93 p_queue
->queue_relative_offset
= cpu_to_le16((u16
)val
);
94 p_queue
->cq_sb_pi
= p_params
->gl_rq_pi
;
96 for (i
= 0; i
< p_params
->num_queues
; i
++) {
97 val
= qed_get_igu_sb_id(p_hwfn
, i
);
98 p_queue
->cq_cmdq_sb_num_arr
[i
] = cpu_to_le16(val
);
101 SET_FIELD(p_queue
->q_validity
,
102 SCSI_INIT_FUNC_QUEUES_CMD_VALID
, 0);
103 p_queue
->cmdq_num_entries
= 0;
104 p_queue
->bdq_resource_id
= (u8
)RESC_START(p_hwfn
, QED_BDQ
);
105 p_ramrod
->tcp_init
.two_msl_timer
= cpu_to_le32(QED_TCP_TWO_MSL_TIMER
);
106 p_ramrod
->tcp_init
.tx_sws_timer
= cpu_to_le16(QED_TCP_SWS_TIMER
);
107 p_init
->half_way_close_timeout
= cpu_to_le16(QED_TCP_HALF_WAY_CLOSE_TIMEOUT
);
108 p_ramrod
->tcp_init
.max_fin_rt
= QED_TCP_MAX_FIN_RT
;
109 SET_FIELD(p_ramrod
->nvmetcp_init_spe
.params
,
110 NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT
, QED_TCP_MAX_FIN_RT
);
111 p_hwfn
->p_nvmetcp_info
->event_context
= event_context
;
112 p_hwfn
->p_nvmetcp_info
->event_cb
= async_event_cb
;
113 qed_spq_register_async_cb(p_hwfn
, PROTOCOLID_TCP_ULP
,
114 qed_nvmetcp_async_event
);
116 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
119 static int qed_sp_nvmetcp_func_stop(struct qed_hwfn
*p_hwfn
,
120 enum spq_mode comp_mode
,
121 struct qed_spq_comp_cb
*p_comp_addr
)
123 struct qed_spq_entry
*p_ent
= NULL
;
124 struct qed_sp_init_data init_data
;
128 memset(&init_data
, 0, sizeof(init_data
));
129 init_data
.cid
= qed_spq_get_cid(p_hwfn
);
130 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
131 init_data
.comp_mode
= comp_mode
;
132 init_data
.p_comp_data
= p_comp_addr
;
133 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
134 NVMETCP_RAMROD_CMD_ID_DESTROY_FUNC
,
135 PROTOCOLID_TCP_ULP
, &init_data
);
139 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
140 qed_spq_unregister_async_cb(p_hwfn
, PROTOCOLID_TCP_ULP
);
145 static int qed_fill_nvmetcp_dev_info(struct qed_dev
*cdev
,
146 struct qed_dev_nvmetcp_info
*info
)
148 struct qed_hwfn
*hwfn
= QED_AFFIN_HWFN(cdev
);
151 memset(info
, 0, sizeof(*info
));
152 rc
= qed_fill_dev_info(cdev
, &info
->common
);
153 info
->port_id
= MFW_PORT(hwfn
);
154 info
->num_cqs
= FEAT_NUM(hwfn
, QED_NVMETCP_CQ
);
159 static void qed_register_nvmetcp_ops(struct qed_dev
*cdev
,
160 struct qed_nvmetcp_cb_ops
*ops
,
163 cdev
->protocol_ops
.nvmetcp
= ops
;
164 cdev
->ops_cookie
= cookie
;
167 static int qed_nvmetcp_stop(struct qed_dev
*cdev
)
171 if (!(cdev
->flags
& QED_FLAG_STORAGE_STARTED
)) {
172 DP_NOTICE(cdev
, "nvmetcp already stopped\n");
177 if (!hash_empty(cdev
->connections
)) {
179 "Can't stop nvmetcp - not all connections were returned\n");
184 /* Stop the nvmetcp */
185 rc
= qed_sp_nvmetcp_func_stop(QED_AFFIN_HWFN(cdev
), QED_SPQ_MODE_EBLOCK
,
187 cdev
->flags
&= ~QED_FLAG_STORAGE_STARTED
;
192 static int qed_nvmetcp_start(struct qed_dev
*cdev
,
193 struct qed_nvmetcp_tid
*tasks
,
195 nvmetcp_event_cb_t async_event_cb
)
197 struct qed_tid_mem
*tid_info
;
200 if (cdev
->flags
& QED_FLAG_STORAGE_STARTED
) {
201 DP_NOTICE(cdev
, "nvmetcp already started;\n");
206 rc
= qed_sp_nvmetcp_func_start(QED_AFFIN_HWFN(cdev
),
207 QED_SPQ_MODE_EBLOCK
, NULL
,
208 event_context
, async_event_cb
);
210 DP_NOTICE(cdev
, "Failed to start nvmetcp\n");
215 cdev
->flags
|= QED_FLAG_STORAGE_STARTED
;
216 hash_init(cdev
->connections
);
221 tid_info
= kzalloc(sizeof(*tid_info
), GFP_KERNEL
);
223 qed_nvmetcp_stop(cdev
);
228 rc
= qed_cxt_get_tid_mem_info(QED_AFFIN_HWFN(cdev
), tid_info
);
230 DP_NOTICE(cdev
, "Failed to gather task information\n");
231 qed_nvmetcp_stop(cdev
);
237 /* Fill task information */
238 tasks
->size
= tid_info
->tid_size
;
239 tasks
->num_tids_per_block
= tid_info
->num_tids_per_block
;
240 memcpy(tasks
->blocks
, tid_info
->blocks
,
241 MAX_TID_BLOCKS_NVMETCP
* sizeof(u8
*));
247 static struct qed_hash_nvmetcp_con
*qed_nvmetcp_get_hash(struct qed_dev
*cdev
,
250 struct qed_hash_nvmetcp_con
*hash_con
= NULL
;
252 if (!(cdev
->flags
& QED_FLAG_STORAGE_STARTED
))
255 hash_for_each_possible(cdev
->connections
, hash_con
, node
, handle
) {
256 if (hash_con
->con
->icid
== handle
)
260 if (!hash_con
|| hash_con
->con
->icid
!= handle
)
266 static int qed_sp_nvmetcp_conn_offload(struct qed_hwfn
*p_hwfn
,
267 struct qed_nvmetcp_conn
*p_conn
,
268 enum spq_mode comp_mode
,
269 struct qed_spq_comp_cb
*p_comp_addr
)
271 struct nvmetcp_spe_conn_offload
*p_ramrod
= NULL
;
272 struct tcp_offload_params_opt2
*p_tcp
= NULL
;
273 struct qed_sp_init_data init_data
= { 0 };
274 struct qed_spq_entry
*p_ent
= NULL
;
275 dma_addr_t r2tq_pbl_addr
;
276 dma_addr_t xhq_pbl_addr
;
277 dma_addr_t uhq_pbl_addr
;
283 init_data
.cid
= p_conn
->icid
;
284 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
285 init_data
.comp_mode
= comp_mode
;
286 init_data
.p_comp_data
= p_comp_addr
;
287 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
288 NVMETCP_RAMROD_CMD_ID_OFFLOAD_CONN
,
289 PROTOCOLID_TCP_ULP
, &init_data
);
293 p_ramrod
= &p_ent
->ramrod
.nvmetcp_conn_offload
;
295 /* Transmission PQ is the first of the PF */
296 physical_q
= qed_get_cm_pq_idx(p_hwfn
, PQ_FLAGS_OFLD
);
297 p_conn
->physical_q0
= cpu_to_le16(physical_q
);
298 p_ramrod
->nvmetcp
.physical_q0
= cpu_to_le16(physical_q
);
300 /* nvmetcp Pure-ACK PQ */
301 physical_q
= qed_get_cm_pq_idx(p_hwfn
, PQ_FLAGS_ACK
);
302 p_conn
->physical_q1
= cpu_to_le16(physical_q
);
303 p_ramrod
->nvmetcp
.physical_q1
= cpu_to_le16(physical_q
);
304 p_ramrod
->conn_id
= cpu_to_le16(p_conn
->conn_id
);
305 DMA_REGPAIR_LE(p_ramrod
->nvmetcp
.sq_pbl_addr
, p_conn
->sq_pbl_addr
);
306 r2tq_pbl_addr
= qed_chain_get_pbl_phys(&p_conn
->r2tq
);
307 DMA_REGPAIR_LE(p_ramrod
->nvmetcp
.r2tq_pbl_addr
, r2tq_pbl_addr
);
308 xhq_pbl_addr
= qed_chain_get_pbl_phys(&p_conn
->xhq
);
309 DMA_REGPAIR_LE(p_ramrod
->nvmetcp
.xhq_pbl_addr
, xhq_pbl_addr
);
310 uhq_pbl_addr
= qed_chain_get_pbl_phys(&p_conn
->uhq
);
311 DMA_REGPAIR_LE(p_ramrod
->nvmetcp
.uhq_pbl_addr
, uhq_pbl_addr
);
312 p_ramrod
->nvmetcp
.flags
= p_conn
->offl_flags
;
313 p_ramrod
->nvmetcp
.default_cq
= p_conn
->default_cq
;
314 p_ramrod
->nvmetcp
.initial_ack
= 0;
315 DMA_REGPAIR_LE(p_ramrod
->nvmetcp
.nvmetcp
.cccid_itid_table_addr
,
316 p_conn
->nvmetcp_cccid_itid_table_addr
);
317 p_ramrod
->nvmetcp
.nvmetcp
.cccid_max_range
=
318 cpu_to_le16(p_conn
->nvmetcp_cccid_max_range
);
319 p_tcp
= &p_ramrod
->tcp
;
320 qed_set_fw_mac_addr(&p_tcp
->remote_mac_addr_hi
,
321 &p_tcp
->remote_mac_addr_mid
,
322 &p_tcp
->remote_mac_addr_lo
, p_conn
->remote_mac
);
323 qed_set_fw_mac_addr(&p_tcp
->local_mac_addr_hi
,
324 &p_tcp
->local_mac_addr_mid
,
325 &p_tcp
->local_mac_addr_lo
, p_conn
->local_mac
);
326 p_tcp
->vlan_id
= cpu_to_le16(p_conn
->vlan_id
);
327 p_tcp
->flags
= cpu_to_le16(p_conn
->tcp_flags
);
328 p_tcp
->ip_version
= p_conn
->ip_version
;
329 if (p_tcp
->ip_version
== TCP_IPV6
) {
330 for (i
= 0; i
< 4; i
++) {
331 p_tcp
->remote_ip
[i
] = cpu_to_le32(p_conn
->remote_ip
[i
]);
332 p_tcp
->local_ip
[i
] = cpu_to_le32(p_conn
->local_ip
[i
]);
335 p_tcp
->remote_ip
[0] = cpu_to_le32(p_conn
->remote_ip
[0]);
336 p_tcp
->local_ip
[0] = cpu_to_le32(p_conn
->local_ip
[0]);
339 p_tcp
->flow_label
= cpu_to_le32(p_conn
->flow_label
);
340 p_tcp
->ttl
= p_conn
->ttl
;
341 p_tcp
->tos_or_tc
= p_conn
->tos_or_tc
;
342 p_tcp
->remote_port
= cpu_to_le16(p_conn
->remote_port
);
343 p_tcp
->local_port
= cpu_to_le16(p_conn
->local_port
);
344 p_tcp
->mss
= cpu_to_le16(p_conn
->mss
);
345 p_tcp
->rcv_wnd_scale
= p_conn
->rcv_wnd_scale
;
346 p_tcp
->connect_mode
= p_conn
->connect_mode
;
347 p_tcp
->cwnd
= cpu_to_le32(p_conn
->cwnd
);
348 p_tcp
->ka_max_probe_cnt
= p_conn
->ka_max_probe_cnt
;
349 p_tcp
->ka_timeout
= cpu_to_le32(p_conn
->ka_timeout
);
350 p_tcp
->max_rt_time
= cpu_to_le32(p_conn
->max_rt_time
);
351 p_tcp
->ka_interval
= cpu_to_le32(p_conn
->ka_interval
);
353 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
356 static int qed_sp_nvmetcp_conn_update(struct qed_hwfn
*p_hwfn
,
357 struct qed_nvmetcp_conn
*p_conn
,
358 enum spq_mode comp_mode
,
359 struct qed_spq_comp_cb
*p_comp_addr
)
361 struct nvmetcp_conn_update_ramrod_params
*p_ramrod
= NULL
;
362 struct qed_spq_entry
*p_ent
= NULL
;
363 struct qed_sp_init_data init_data
;
368 memset(&init_data
, 0, sizeof(init_data
));
369 init_data
.cid
= p_conn
->icid
;
370 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
371 init_data
.comp_mode
= comp_mode
;
372 init_data
.p_comp_data
= p_comp_addr
;
374 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
375 NVMETCP_RAMROD_CMD_ID_UPDATE_CONN
,
376 PROTOCOLID_TCP_ULP
, &init_data
);
380 p_ramrod
= &p_ent
->ramrod
.nvmetcp_conn_update
;
381 p_ramrod
->conn_id
= cpu_to_le16(p_conn
->conn_id
);
382 p_ramrod
->flags
= p_conn
->update_flag
;
383 p_ramrod
->max_seq_size
= cpu_to_le32(p_conn
->max_seq_size
);
384 dval
= p_conn
->max_recv_pdu_length
;
385 p_ramrod
->max_recv_pdu_length
= cpu_to_le32(dval
);
386 dval
= p_conn
->max_send_pdu_length
;
387 p_ramrod
->max_send_pdu_length
= cpu_to_le32(dval
);
388 p_ramrod
->first_seq_length
= cpu_to_le32(p_conn
->first_seq_length
);
390 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
393 static int qed_sp_nvmetcp_conn_terminate(struct qed_hwfn
*p_hwfn
,
394 struct qed_nvmetcp_conn
*p_conn
,
395 enum spq_mode comp_mode
,
396 struct qed_spq_comp_cb
*p_comp_addr
)
398 struct nvmetcp_spe_conn_termination
*p_ramrod
= NULL
;
399 struct qed_spq_entry
*p_ent
= NULL
;
400 struct qed_sp_init_data init_data
;
404 memset(&init_data
, 0, sizeof(init_data
));
405 init_data
.cid
= p_conn
->icid
;
406 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
407 init_data
.comp_mode
= comp_mode
;
408 init_data
.p_comp_data
= p_comp_addr
;
409 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
410 NVMETCP_RAMROD_CMD_ID_TERMINATION_CONN
,
411 PROTOCOLID_TCP_ULP
, &init_data
);
415 p_ramrod
= &p_ent
->ramrod
.nvmetcp_conn_terminate
;
416 p_ramrod
->conn_id
= cpu_to_le16(p_conn
->conn_id
);
417 p_ramrod
->abortive
= p_conn
->abortive_dsconnect
;
419 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
422 static int qed_sp_nvmetcp_conn_clear_sq(struct qed_hwfn
*p_hwfn
,
423 struct qed_nvmetcp_conn
*p_conn
,
424 enum spq_mode comp_mode
,
425 struct qed_spq_comp_cb
*p_comp_addr
)
427 struct qed_spq_entry
*p_ent
= NULL
;
428 struct qed_sp_init_data init_data
;
432 memset(&init_data
, 0, sizeof(init_data
));
433 init_data
.cid
= p_conn
->icid
;
434 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
435 init_data
.comp_mode
= comp_mode
;
436 init_data
.p_comp_data
= p_comp_addr
;
437 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
438 NVMETCP_RAMROD_CMD_ID_CLEAR_SQ
,
439 PROTOCOLID_TCP_ULP
, &init_data
);
443 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
446 static void __iomem
*qed_nvmetcp_get_db_addr(struct qed_hwfn
*p_hwfn
, u32 cid
)
448 return (u8 __iomem
*)p_hwfn
->doorbells
+
449 qed_db_addr(cid
, DQ_DEMS_LEGACY
);
452 static int qed_nvmetcp_allocate_connection(struct qed_hwfn
*p_hwfn
,
453 struct qed_nvmetcp_conn
**p_out_conn
)
455 struct qed_chain_init_params params
= {
456 .mode
= QED_CHAIN_MODE_PBL
,
457 .intended_use
= QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
458 .cnt_type
= QED_CHAIN_CNT_TYPE_U16
,
460 struct qed_nvmetcp_pf_params
*p_params
= NULL
;
461 struct qed_nvmetcp_conn
*p_conn
= NULL
;
464 /* Try finding a free connection that can be used */
465 spin_lock_bh(&p_hwfn
->p_nvmetcp_info
->lock
);
466 if (!list_empty(&p_hwfn
->p_nvmetcp_info
->free_list
))
467 p_conn
= list_first_entry(&p_hwfn
->p_nvmetcp_info
->free_list
,
468 struct qed_nvmetcp_conn
, list_entry
);
470 list_del(&p_conn
->list_entry
);
471 spin_unlock_bh(&p_hwfn
->p_nvmetcp_info
->lock
);
472 *p_out_conn
= p_conn
;
476 spin_unlock_bh(&p_hwfn
->p_nvmetcp_info
->lock
);
478 /* Need to allocate a new connection */
479 p_params
= &p_hwfn
->pf_params
.nvmetcp_pf_params
;
480 p_conn
= kzalloc(sizeof(*p_conn
), GFP_KERNEL
);
484 params
.num_elems
= p_params
->num_r2tq_pages_in_ring
*
485 QED_CHAIN_PAGE_SIZE
/ sizeof(struct nvmetcp_wqe
);
486 params
.elem_size
= sizeof(struct nvmetcp_wqe
);
487 rc
= qed_chain_alloc(p_hwfn
->cdev
, &p_conn
->r2tq
, ¶ms
);
491 params
.num_elems
= p_params
->num_uhq_pages_in_ring
*
492 QED_CHAIN_PAGE_SIZE
/ sizeof(struct iscsi_uhqe
);
493 params
.elem_size
= sizeof(struct iscsi_uhqe
);
494 rc
= qed_chain_alloc(p_hwfn
->cdev
, &p_conn
->uhq
, ¶ms
);
498 params
.elem_size
= sizeof(struct iscsi_xhqe
);
499 rc
= qed_chain_alloc(p_hwfn
->cdev
, &p_conn
->xhq
, ¶ms
);
503 p_conn
->free_on_delete
= true;
504 *p_out_conn
= p_conn
;
509 qed_chain_free(p_hwfn
->cdev
, &p_conn
->uhq
);
511 qed_chain_free(p_hwfn
->cdev
, &p_conn
->r2tq
);
518 static int qed_nvmetcp_acquire_connection(struct qed_hwfn
*p_hwfn
,
519 struct qed_nvmetcp_conn
**p_out_conn
)
521 struct qed_nvmetcp_conn
*p_conn
= NULL
;
525 spin_lock_bh(&p_hwfn
->p_nvmetcp_info
->lock
);
526 rc
= qed_cxt_acquire_cid(p_hwfn
, PROTOCOLID_TCP_ULP
, &icid
);
527 spin_unlock_bh(&p_hwfn
->p_nvmetcp_info
->lock
);
532 rc
= qed_nvmetcp_allocate_connection(p_hwfn
, &p_conn
);
534 spin_lock_bh(&p_hwfn
->p_nvmetcp_info
->lock
);
535 qed_cxt_release_cid(p_hwfn
, icid
);
536 spin_unlock_bh(&p_hwfn
->p_nvmetcp_info
->lock
);
542 p_conn
->conn_id
= (u16
)icid
;
543 p_conn
->fw_cid
= (p_hwfn
->hw_info
.opaque_fid
<< 16) | icid
;
544 *p_out_conn
= p_conn
;
549 static void qed_nvmetcp_release_connection(struct qed_hwfn
*p_hwfn
,
550 struct qed_nvmetcp_conn
*p_conn
)
552 spin_lock_bh(&p_hwfn
->p_nvmetcp_info
->lock
);
553 list_add_tail(&p_conn
->list_entry
, &p_hwfn
->p_nvmetcp_info
->free_list
);
554 qed_cxt_release_cid(p_hwfn
, p_conn
->icid
);
555 spin_unlock_bh(&p_hwfn
->p_nvmetcp_info
->lock
);
558 static void qed_nvmetcp_free_connection(struct qed_hwfn
*p_hwfn
,
559 struct qed_nvmetcp_conn
*p_conn
)
561 qed_chain_free(p_hwfn
->cdev
, &p_conn
->xhq
);
562 qed_chain_free(p_hwfn
->cdev
, &p_conn
->uhq
);
563 qed_chain_free(p_hwfn
->cdev
, &p_conn
->r2tq
);
567 int qed_nvmetcp_alloc(struct qed_hwfn
*p_hwfn
)
569 struct qed_nvmetcp_info
*p_nvmetcp_info
;
571 p_nvmetcp_info
= kzalloc(sizeof(*p_nvmetcp_info
), GFP_KERNEL
);
575 INIT_LIST_HEAD(&p_nvmetcp_info
->free_list
);
576 p_hwfn
->p_nvmetcp_info
= p_nvmetcp_info
;
581 void qed_nvmetcp_setup(struct qed_hwfn
*p_hwfn
)
583 spin_lock_init(&p_hwfn
->p_nvmetcp_info
->lock
);
586 void qed_nvmetcp_free(struct qed_hwfn
*p_hwfn
)
588 struct qed_nvmetcp_conn
*p_conn
= NULL
;
590 if (!p_hwfn
->p_nvmetcp_info
)
593 while (!list_empty(&p_hwfn
->p_nvmetcp_info
->free_list
)) {
594 p_conn
= list_first_entry(&p_hwfn
->p_nvmetcp_info
->free_list
,
595 struct qed_nvmetcp_conn
, list_entry
);
597 list_del(&p_conn
->list_entry
);
598 qed_nvmetcp_free_connection(p_hwfn
, p_conn
);
602 kfree(p_hwfn
->p_nvmetcp_info
);
603 p_hwfn
->p_nvmetcp_info
= NULL
;
606 static int qed_nvmetcp_acquire_conn(struct qed_dev
*cdev
,
608 u32
*fw_cid
, void __iomem
**p_doorbell
)
610 struct qed_hash_nvmetcp_con
*hash_con
;
613 /* Allocate a hashed connection */
614 hash_con
= kzalloc(sizeof(*hash_con
), GFP_ATOMIC
);
618 /* Acquire the connection */
619 rc
= qed_nvmetcp_acquire_connection(QED_AFFIN_HWFN(cdev
),
622 DP_NOTICE(cdev
, "Failed to acquire Connection\n");
628 /* Added the connection to hash table */
629 *handle
= hash_con
->con
->icid
;
630 *fw_cid
= hash_con
->con
->fw_cid
;
631 hash_add(cdev
->connections
, &hash_con
->node
, *handle
);
633 *p_doorbell
= qed_nvmetcp_get_db_addr(QED_AFFIN_HWFN(cdev
),
639 static int qed_nvmetcp_release_conn(struct qed_dev
*cdev
, u32 handle
)
641 struct qed_hash_nvmetcp_con
*hash_con
;
643 hash_con
= qed_nvmetcp_get_hash(cdev
, handle
);
645 DP_NOTICE(cdev
, "Failed to find connection for handle %d\n",
651 hlist_del(&hash_con
->node
);
652 qed_nvmetcp_release_connection(QED_AFFIN_HWFN(cdev
), hash_con
->con
);
658 static int qed_nvmetcp_offload_conn(struct qed_dev
*cdev
, u32 handle
,
659 struct qed_nvmetcp_params_offload
*conn_info
)
661 struct qed_hash_nvmetcp_con
*hash_con
;
662 struct qed_nvmetcp_conn
*con
;
664 hash_con
= qed_nvmetcp_get_hash(cdev
, handle
);
666 DP_NOTICE(cdev
, "Failed to find connection for handle %d\n",
672 /* Update the connection with information from the params */
675 /* FW initializations */
676 con
->layer_code
= NVMETCP_SLOW_PATH_LAYER_CODE
;
677 con
->sq_pbl_addr
= conn_info
->sq_pbl_addr
;
678 con
->nvmetcp_cccid_max_range
= conn_info
->nvmetcp_cccid_max_range
;
679 con
->nvmetcp_cccid_itid_table_addr
= conn_info
->nvmetcp_cccid_itid_table_addr
;
680 con
->default_cq
= conn_info
->default_cq
;
681 SET_FIELD(con
->offl_flags
, NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE
, 0);
682 SET_FIELD(con
->offl_flags
, NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE
, 1);
683 SET_FIELD(con
->offl_flags
, NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B
, 1);
685 /* Networking and TCP stack initializations */
686 ether_addr_copy(con
->local_mac
, conn_info
->src
.mac
);
687 ether_addr_copy(con
->remote_mac
, conn_info
->dst
.mac
);
688 memcpy(con
->local_ip
, conn_info
->src
.ip
, sizeof(con
->local_ip
));
689 memcpy(con
->remote_ip
, conn_info
->dst
.ip
, sizeof(con
->remote_ip
));
690 con
->local_port
= conn_info
->src
.port
;
691 con
->remote_port
= conn_info
->dst
.port
;
692 con
->vlan_id
= conn_info
->vlan_id
;
694 if (conn_info
->timestamp_en
)
695 SET_FIELD(con
->tcp_flags
, TCP_OFFLOAD_PARAMS_OPT2_TS_EN
, 1);
697 if (conn_info
->delayed_ack_en
)
698 SET_FIELD(con
->tcp_flags
, TCP_OFFLOAD_PARAMS_OPT2_DA_EN
, 1);
700 if (conn_info
->tcp_keep_alive_en
)
701 SET_FIELD(con
->tcp_flags
, TCP_OFFLOAD_PARAMS_OPT2_KA_EN
, 1);
703 if (conn_info
->ecn_en
)
704 SET_FIELD(con
->tcp_flags
, TCP_OFFLOAD_PARAMS_OPT2_ECN_EN
, 1);
706 con
->ip_version
= conn_info
->ip_version
;
707 con
->flow_label
= QED_TCP_FLOW_LABEL
;
708 con
->ka_max_probe_cnt
= conn_info
->ka_max_probe_cnt
;
709 con
->ka_timeout
= conn_info
->ka_timeout
;
710 con
->ka_interval
= conn_info
->ka_interval
;
711 con
->max_rt_time
= conn_info
->max_rt_time
;
712 con
->ttl
= conn_info
->ttl
;
713 con
->tos_or_tc
= conn_info
->tos_or_tc
;
714 con
->mss
= conn_info
->mss
;
715 con
->cwnd
= conn_info
->cwnd
;
716 con
->rcv_wnd_scale
= conn_info
->rcv_wnd_scale
;
717 con
->connect_mode
= 0;
719 return qed_sp_nvmetcp_conn_offload(QED_AFFIN_HWFN(cdev
), con
,
720 QED_SPQ_MODE_EBLOCK
, NULL
);
723 static int qed_nvmetcp_update_conn(struct qed_dev
*cdev
,
725 struct qed_nvmetcp_params_update
*conn_info
)
727 struct qed_hash_nvmetcp_con
*hash_con
;
728 struct qed_nvmetcp_conn
*con
;
730 hash_con
= qed_nvmetcp_get_hash(cdev
, handle
);
732 DP_NOTICE(cdev
, "Failed to find connection for handle %d\n",
738 /* Update the connection with information from the params */
740 SET_FIELD(con
->update_flag
,
741 ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T
, 0);
742 SET_FIELD(con
->update_flag
,
743 ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA
, 1);
744 if (conn_info
->hdr_digest_en
)
745 SET_FIELD(con
->update_flag
, ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN
, 1);
747 if (conn_info
->data_digest_en
)
748 SET_FIELD(con
->update_flag
, ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN
, 1);
750 /* Placeholder - initialize pfv, cpda, hpda */
752 con
->max_seq_size
= conn_info
->max_io_size
;
753 con
->max_recv_pdu_length
= conn_info
->max_recv_pdu_length
;
754 con
->max_send_pdu_length
= conn_info
->max_send_pdu_length
;
755 con
->first_seq_length
= conn_info
->max_io_size
;
757 return qed_sp_nvmetcp_conn_update(QED_AFFIN_HWFN(cdev
), con
,
758 QED_SPQ_MODE_EBLOCK
, NULL
);
761 static int qed_nvmetcp_clear_conn_sq(struct qed_dev
*cdev
, u32 handle
)
763 struct qed_hash_nvmetcp_con
*hash_con
;
765 hash_con
= qed_nvmetcp_get_hash(cdev
, handle
);
767 DP_NOTICE(cdev
, "Failed to find connection for handle %d\n",
773 return qed_sp_nvmetcp_conn_clear_sq(QED_AFFIN_HWFN(cdev
), hash_con
->con
,
774 QED_SPQ_MODE_EBLOCK
, NULL
);
777 static int qed_nvmetcp_destroy_conn(struct qed_dev
*cdev
,
778 u32 handle
, u8 abrt_conn
)
780 struct qed_hash_nvmetcp_con
*hash_con
;
782 hash_con
= qed_nvmetcp_get_hash(cdev
, handle
);
784 DP_NOTICE(cdev
, "Failed to find connection for handle %d\n",
790 hash_con
->con
->abortive_dsconnect
= abrt_conn
;
792 return qed_sp_nvmetcp_conn_terminate(QED_AFFIN_HWFN(cdev
), hash_con
->con
,
793 QED_SPQ_MODE_EBLOCK
, NULL
);
796 static const struct qed_nvmetcp_ops qed_nvmetcp_ops_pass
= {
797 .common
= &qed_common_ops_pass
,
798 .ll2
= &qed_ll2_ops_pass
,
799 .fill_dev_info
= &qed_fill_nvmetcp_dev_info
,
800 .register_ops
= &qed_register_nvmetcp_ops
,
801 .start
= &qed_nvmetcp_start
,
802 .stop
= &qed_nvmetcp_stop
,
803 .acquire_conn
= &qed_nvmetcp_acquire_conn
,
804 .release_conn
= &qed_nvmetcp_release_conn
,
805 .offload_conn
= &qed_nvmetcp_offload_conn
,
806 .update_conn
= &qed_nvmetcp_update_conn
,
807 .destroy_conn
= &qed_nvmetcp_destroy_conn
,
808 .clear_sq
= &qed_nvmetcp_clear_conn_sq
,
809 .add_src_tcp_port_filter
= &qed_llh_add_src_tcp_port_filter
,
810 .remove_src_tcp_port_filter
= &qed_llh_remove_src_tcp_port_filter
,
811 .add_dst_tcp_port_filter
= &qed_llh_add_dst_tcp_port_filter
,
812 .remove_dst_tcp_port_filter
= &qed_llh_remove_dst_tcp_port_filter
,
813 .clear_all_filters
= &qed_llh_clear_all_filters
,
814 .init_read_io
= &init_nvmetcp_host_read_task
,
815 .init_write_io
= &init_nvmetcp_host_write_task
,
816 .init_icreq_exchange
= &init_nvmetcp_init_conn_req_task
,
817 .init_task_cleanup
= &init_cleanup_task_nvmetcp
820 const struct qed_nvmetcp_ops
*qed_get_nvmetcp_ops(void)
822 return &qed_nvmetcp_ops_pass
;
824 EXPORT_SYMBOL(qed_get_nvmetcp_ops
);
826 void qed_put_nvmetcp_ops(void)
829 EXPORT_SYMBOL(qed_put_nvmetcp_ops
);