WIP FPC-III support
[linux/fpc-iii.git] / drivers / net / ethernet / qlogic / qed / qed_fcoe.c
blobb768f0698170e43e3e0cd7a7cb39f87367511ba7
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
5 */
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
9 #include <asm/param.h>
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel.h>
14 #include <linux/log2.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/slab.h>
18 #include <linux/stddef.h>
19 #include <linux/string.h>
20 #include <linux/workqueue.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #define __PREVENT_DUMP_MEM_ARR__
25 #define __PREVENT_PXP_GLOBAL_WIN__
26 #include "qed.h"
27 #include "qed_cxt.h"
28 #include "qed_dev_api.h"
29 #include "qed_fcoe.h"
30 #include "qed_hsi.h"
31 #include "qed_hw.h"
32 #include "qed_int.h"
33 #include "qed_ll2.h"
34 #include "qed_mcp.h"
35 #include "qed_reg_addr.h"
36 #include "qed_sp.h"
37 #include "qed_sriov.h"
38 #include <linux/qed/qed_fcoe_if.h>
40 struct qed_fcoe_conn {
41 struct list_head list_entry;
42 bool free_on_delete;
44 u16 conn_id;
45 u32 icid;
46 u32 fw_cid;
47 u8 layer_code;
49 dma_addr_t sq_pbl_addr;
50 dma_addr_t sq_curr_page_addr;
51 dma_addr_t sq_next_page_addr;
52 dma_addr_t xferq_pbl_addr;
53 void *xferq_pbl_addr_virt_addr;
54 dma_addr_t xferq_addr[4];
55 void *xferq_addr_virt_addr[4];
56 dma_addr_t confq_pbl_addr;
57 void *confq_pbl_addr_virt_addr;
58 dma_addr_t confq_addr[2];
59 void *confq_addr_virt_addr[2];
61 dma_addr_t terminate_params;
63 u16 dst_mac_addr_lo;
64 u16 dst_mac_addr_mid;
65 u16 dst_mac_addr_hi;
66 u16 src_mac_addr_lo;
67 u16 src_mac_addr_mid;
68 u16 src_mac_addr_hi;
70 u16 tx_max_fc_pay_len;
71 u16 e_d_tov_timer_val;
72 u16 rec_tov_timer_val;
73 u16 rx_max_fc_pay_len;
74 u16 vlan_tag;
75 u16 physical_q0;
77 struct fc_addr_nw s_id;
78 u8 max_conc_seqs_c3;
79 struct fc_addr_nw d_id;
80 u8 flags;
81 u8 def_q_idx;
84 static int
85 qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
86 enum spq_mode comp_mode,
87 struct qed_spq_comp_cb *p_comp_addr)
89 struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
90 struct fcoe_init_ramrod_params *p_ramrod = NULL;
91 struct fcoe_init_func_ramrod_data *p_data;
92 struct e4_fcoe_conn_context *p_cxt = NULL;
93 struct qed_spq_entry *p_ent = NULL;
94 struct qed_sp_init_data init_data;
95 struct qed_cxt_info cxt_info;
96 u32 dummy_cid;
97 int rc = 0;
98 __le16 tmp;
99 u8 i;
101 /* Get SPQ entry */
102 memset(&init_data, 0, sizeof(init_data));
103 init_data.cid = qed_spq_get_cid(p_hwfn);
104 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
105 init_data.comp_mode = comp_mode;
106 init_data.p_comp_data = p_comp_addr;
108 rc = qed_sp_init_request(p_hwfn, &p_ent,
109 FCOE_RAMROD_CMD_ID_INIT_FUNC,
110 PROTOCOLID_FCOE, &init_data);
111 if (rc)
112 return rc;
114 p_ramrod = &p_ent->ramrod.fcoe_init;
115 p_data = &p_ramrod->init_ramrod_data;
116 fcoe_pf_params = &p_hwfn->pf_params.fcoe_pf_params;
118 /* Sanity */
119 if (fcoe_pf_params->num_cqs > p_hwfn->hw_info.feat_num[QED_FCOE_CQ]) {
120 DP_ERR(p_hwfn,
121 "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n",
122 fcoe_pf_params->num_cqs,
123 p_hwfn->hw_info.feat_num[QED_FCOE_CQ]);
124 rc = -EINVAL;
125 goto err;
128 p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
129 tmp = cpu_to_le16(fcoe_pf_params->sq_num_pbl_pages);
130 p_data->sq_num_pages_in_pbl = tmp;
132 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid);
133 if (rc)
134 goto err;
136 cxt_info.iid = dummy_cid;
137 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
138 if (rc) {
139 DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n",
140 dummy_cid);
141 goto err;
143 p_cxt = cxt_info.p_cxt;
144 memset(p_cxt, 0, sizeof(*p_cxt));
146 SET_FIELD(p_cxt->tstorm_ag_context.flags3,
147 E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
149 fcoe_pf_params->dummy_icid = (u16)dummy_cid;
151 tmp = cpu_to_le16(fcoe_pf_params->num_tasks);
152 p_data->func_params.num_tasks = tmp;
153 p_data->func_params.log_page_size = fcoe_pf_params->log_page_size;
154 p_data->func_params.debug_mode = fcoe_pf_params->debug_mode;
156 DMA_REGPAIR_LE(p_data->q_params.glbl_q_params_addr,
157 fcoe_pf_params->glbl_q_params_addr);
159 tmp = cpu_to_le16(fcoe_pf_params->cq_num_entries);
160 p_data->q_params.cq_num_entries = tmp;
162 tmp = cpu_to_le16(fcoe_pf_params->cmdq_num_entries);
163 p_data->q_params.cmdq_num_entries = tmp;
165 p_data->q_params.num_queues = fcoe_pf_params->num_cqs;
167 tmp = (__force __le16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS];
168 p_data->q_params.queue_relative_offset = (__force u8)tmp;
170 for (i = 0; i < fcoe_pf_params->num_cqs; i++) {
171 tmp = cpu_to_le16(qed_get_igu_sb_id(p_hwfn, i));
172 p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp;
175 p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi;
176 p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi;
178 p_data->q_params.bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
180 DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ],
181 fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
182 p_data->q_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
183 fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_RQ];
184 tmp = cpu_to_le16(fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ]);
185 p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = tmp;
186 tmp = cpu_to_le16(fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ]);
187 p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = tmp;
189 DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_IMM_DATA],
190 fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]);
191 p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA] =
192 fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA];
193 tmp = cpu_to_le16(fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA]);
194 p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = tmp;
195 tmp = cpu_to_le16(fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA]);
196 p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = tmp;
197 tmp = cpu_to_le16(fcoe_pf_params->rq_buffer_size);
198 p_data->q_params.rq_buffer_size = tmp;
200 if (fcoe_pf_params->is_target) {
201 SET_FIELD(p_data->q_params.q_validity,
202 SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
203 if (p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA])
204 SET_FIELD(p_data->q_params.q_validity,
205 SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1);
206 SET_FIELD(p_data->q_params.q_validity,
207 SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1);
208 } else {
209 SET_FIELD(p_data->q_params.q_validity,
210 SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
213 rc = qed_spq_post(p_hwfn, p_ent, NULL);
215 return rc;
217 err:
218 qed_sp_destroy_request(p_hwfn, p_ent);
219 return rc;
222 static int
223 qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
224 struct qed_fcoe_conn *p_conn,
225 enum spq_mode comp_mode,
226 struct qed_spq_comp_cb *p_comp_addr)
228 struct fcoe_conn_offload_ramrod_params *p_ramrod = NULL;
229 struct fcoe_conn_offload_ramrod_data *p_data;
230 struct qed_spq_entry *p_ent = NULL;
231 struct qed_sp_init_data init_data;
232 u16 physical_q0;
233 __le16 tmp;
234 int rc;
236 /* Get SPQ entry */
237 memset(&init_data, 0, sizeof(init_data));
238 init_data.cid = p_conn->icid;
239 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
240 init_data.comp_mode = comp_mode;
241 init_data.p_comp_data = p_comp_addr;
243 rc = qed_sp_init_request(p_hwfn, &p_ent,
244 FCOE_RAMROD_CMD_ID_OFFLOAD_CONN,
245 PROTOCOLID_FCOE, &init_data);
246 if (rc)
247 return rc;
249 p_ramrod = &p_ent->ramrod.fcoe_conn_ofld;
250 p_data = &p_ramrod->offload_ramrod_data;
252 /* Transmission PQ is the first of the PF */
253 physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
254 p_conn->physical_q0 = physical_q0;
255 p_data->physical_q0 = cpu_to_le16(physical_q0);
257 p_data->conn_id = cpu_to_le16(p_conn->conn_id);
258 DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr);
259 DMA_REGPAIR_LE(p_data->sq_curr_page_addr, p_conn->sq_curr_page_addr);
260 DMA_REGPAIR_LE(p_data->sq_next_page_addr, p_conn->sq_next_page_addr);
261 DMA_REGPAIR_LE(p_data->xferq_pbl_addr, p_conn->xferq_pbl_addr);
262 DMA_REGPAIR_LE(p_data->xferq_curr_page_addr, p_conn->xferq_addr[0]);
263 DMA_REGPAIR_LE(p_data->xferq_next_page_addr, p_conn->xferq_addr[1]);
265 DMA_REGPAIR_LE(p_data->respq_pbl_addr, p_conn->confq_pbl_addr);
266 DMA_REGPAIR_LE(p_data->respq_curr_page_addr, p_conn->confq_addr[0]);
267 DMA_REGPAIR_LE(p_data->respq_next_page_addr, p_conn->confq_addr[1]);
269 p_data->dst_mac_addr_lo = cpu_to_le16(p_conn->dst_mac_addr_lo);
270 p_data->dst_mac_addr_mid = cpu_to_le16(p_conn->dst_mac_addr_mid);
271 p_data->dst_mac_addr_hi = cpu_to_le16(p_conn->dst_mac_addr_hi);
272 p_data->src_mac_addr_lo = cpu_to_le16(p_conn->src_mac_addr_lo);
273 p_data->src_mac_addr_mid = cpu_to_le16(p_conn->src_mac_addr_mid);
274 p_data->src_mac_addr_hi = cpu_to_le16(p_conn->src_mac_addr_hi);
276 tmp = cpu_to_le16(p_conn->tx_max_fc_pay_len);
277 p_data->tx_max_fc_pay_len = tmp;
278 tmp = cpu_to_le16(p_conn->e_d_tov_timer_val);
279 p_data->e_d_tov_timer_val = tmp;
280 tmp = cpu_to_le16(p_conn->rec_tov_timer_val);
281 p_data->rec_rr_tov_timer_val = tmp;
282 tmp = cpu_to_le16(p_conn->rx_max_fc_pay_len);
283 p_data->rx_max_fc_pay_len = tmp;
285 p_data->vlan_tag = cpu_to_le16(p_conn->vlan_tag);
286 p_data->s_id.addr_hi = p_conn->s_id.addr_hi;
287 p_data->s_id.addr_mid = p_conn->s_id.addr_mid;
288 p_data->s_id.addr_lo = p_conn->s_id.addr_lo;
289 p_data->max_conc_seqs_c3 = p_conn->max_conc_seqs_c3;
290 p_data->d_id.addr_hi = p_conn->d_id.addr_hi;
291 p_data->d_id.addr_mid = p_conn->d_id.addr_mid;
292 p_data->d_id.addr_lo = p_conn->d_id.addr_lo;
293 p_data->flags = p_conn->flags;
294 if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
295 SET_FIELD(p_data->flags,
296 FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN, 1);
297 p_data->def_q_idx = p_conn->def_q_idx;
299 return qed_spq_post(p_hwfn, p_ent, NULL);
302 static int
303 qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn,
304 struct qed_fcoe_conn *p_conn,
305 enum spq_mode comp_mode,
306 struct qed_spq_comp_cb *p_comp_addr)
308 struct fcoe_conn_terminate_ramrod_params *p_ramrod = NULL;
309 struct qed_spq_entry *p_ent = NULL;
310 struct qed_sp_init_data init_data;
311 int rc = 0;
313 /* Get SPQ entry */
314 memset(&init_data, 0, sizeof(init_data));
315 init_data.cid = p_conn->icid;
316 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
317 init_data.comp_mode = comp_mode;
318 init_data.p_comp_data = p_comp_addr;
320 rc = qed_sp_init_request(p_hwfn, &p_ent,
321 FCOE_RAMROD_CMD_ID_TERMINATE_CONN,
322 PROTOCOLID_FCOE, &init_data);
323 if (rc)
324 return rc;
326 p_ramrod = &p_ent->ramrod.fcoe_conn_terminate;
327 DMA_REGPAIR_LE(p_ramrod->terminate_ramrod_data.terminate_params_addr,
328 p_conn->terminate_params);
330 return qed_spq_post(p_hwfn, p_ent, NULL);
333 static int
334 qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn,
335 struct qed_ptt *p_ptt,
336 enum spq_mode comp_mode,
337 struct qed_spq_comp_cb *p_comp_addr)
339 struct qed_spq_entry *p_ent = NULL;
340 struct qed_sp_init_data init_data;
341 u32 active_segs = 0;
342 int rc = 0;
344 /* Get SPQ entry */
345 memset(&init_data, 0, sizeof(init_data));
346 init_data.cid = p_hwfn->pf_params.fcoe_pf_params.dummy_icid;
347 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
348 init_data.comp_mode = comp_mode;
349 init_data.p_comp_data = p_comp_addr;
351 rc = qed_sp_init_request(p_hwfn, &p_ent,
352 FCOE_RAMROD_CMD_ID_DESTROY_FUNC,
353 PROTOCOLID_FCOE, &init_data);
354 if (rc)
355 return rc;
357 active_segs = qed_rd(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK);
358 active_segs &= ~BIT(QED_CXT_FCOE_TID_SEG);
359 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, active_segs);
361 return qed_spq_post(p_hwfn, p_ent, NULL);
364 static int
365 qed_fcoe_allocate_connection(struct qed_hwfn *p_hwfn,
366 struct qed_fcoe_conn **p_out_conn)
368 struct qed_fcoe_conn *p_conn = NULL;
369 void *p_addr;
370 u32 i;
372 spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
373 if (!list_empty(&p_hwfn->p_fcoe_info->free_list))
374 p_conn =
375 list_first_entry(&p_hwfn->p_fcoe_info->free_list,
376 struct qed_fcoe_conn, list_entry);
377 if (p_conn) {
378 list_del(&p_conn->list_entry);
379 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
380 *p_out_conn = p_conn;
381 return 0;
383 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
385 p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL);
386 if (!p_conn)
387 return -ENOMEM;
389 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
390 QED_CHAIN_PAGE_SIZE,
391 &p_conn->xferq_pbl_addr, GFP_KERNEL);
392 if (!p_addr)
393 goto nomem_pbl_xferq;
394 p_conn->xferq_pbl_addr_virt_addr = p_addr;
396 for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
397 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
398 QED_CHAIN_PAGE_SIZE,
399 &p_conn->xferq_addr[i], GFP_KERNEL);
400 if (!p_addr)
401 goto nomem_xferq;
402 p_conn->xferq_addr_virt_addr[i] = p_addr;
404 p_addr = p_conn->xferq_pbl_addr_virt_addr;
405 ((dma_addr_t *)p_addr)[i] = p_conn->xferq_addr[i];
408 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
409 QED_CHAIN_PAGE_SIZE,
410 &p_conn->confq_pbl_addr, GFP_KERNEL);
411 if (!p_addr)
412 goto nomem_xferq;
413 p_conn->confq_pbl_addr_virt_addr = p_addr;
415 for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
416 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
417 QED_CHAIN_PAGE_SIZE,
418 &p_conn->confq_addr[i], GFP_KERNEL);
419 if (!p_addr)
420 goto nomem_confq;
421 p_conn->confq_addr_virt_addr[i] = p_addr;
423 p_addr = p_conn->confq_pbl_addr_virt_addr;
424 ((dma_addr_t *)p_addr)[i] = p_conn->confq_addr[i];
427 p_conn->free_on_delete = true;
428 *p_out_conn = p_conn;
429 return 0;
431 nomem_confq:
432 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
433 QED_CHAIN_PAGE_SIZE,
434 p_conn->confq_pbl_addr_virt_addr,
435 p_conn->confq_pbl_addr);
436 for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++)
437 if (p_conn->confq_addr_virt_addr[i])
438 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
439 QED_CHAIN_PAGE_SIZE,
440 p_conn->confq_addr_virt_addr[i],
441 p_conn->confq_addr[i]);
442 nomem_xferq:
443 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
444 QED_CHAIN_PAGE_SIZE,
445 p_conn->xferq_pbl_addr_virt_addr,
446 p_conn->xferq_pbl_addr);
447 for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++)
448 if (p_conn->xferq_addr_virt_addr[i])
449 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
450 QED_CHAIN_PAGE_SIZE,
451 p_conn->xferq_addr_virt_addr[i],
452 p_conn->xferq_addr[i]);
453 nomem_pbl_xferq:
454 kfree(p_conn);
455 return -ENOMEM;
458 static void qed_fcoe_free_connection(struct qed_hwfn *p_hwfn,
459 struct qed_fcoe_conn *p_conn)
461 u32 i;
463 if (!p_conn)
464 return;
466 if (p_conn->confq_pbl_addr_virt_addr)
467 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
468 QED_CHAIN_PAGE_SIZE,
469 p_conn->confq_pbl_addr_virt_addr,
470 p_conn->confq_pbl_addr);
472 for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
473 if (!p_conn->confq_addr_virt_addr[i])
474 continue;
475 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
476 QED_CHAIN_PAGE_SIZE,
477 p_conn->confq_addr_virt_addr[i],
478 p_conn->confq_addr[i]);
481 if (p_conn->xferq_pbl_addr_virt_addr)
482 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
483 QED_CHAIN_PAGE_SIZE,
484 p_conn->xferq_pbl_addr_virt_addr,
485 p_conn->xferq_pbl_addr);
487 for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
488 if (!p_conn->xferq_addr_virt_addr[i])
489 continue;
490 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
491 QED_CHAIN_PAGE_SIZE,
492 p_conn->xferq_addr_virt_addr[i],
493 p_conn->xferq_addr[i]);
495 kfree(p_conn);
498 static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
500 return (u8 __iomem *)p_hwfn->doorbells +
501 qed_db_addr(cid, DQ_DEMS_LEGACY);
504 static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
505 u8 bdq_id)
507 if (RESC_NUM(p_hwfn, QED_BDQ)) {
508 return (u8 __iomem *)p_hwfn->regview +
509 GTT_BAR0_MAP_REG_MSDM_RAM +
510 MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
511 QED_BDQ),
512 bdq_id);
513 } else {
514 DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
515 return NULL;
519 static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
520 u8 bdq_id)
522 if (RESC_NUM(p_hwfn, QED_BDQ)) {
523 return (u8 __iomem *)p_hwfn->regview +
524 GTT_BAR0_MAP_REG_TSDM_RAM +
525 TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
526 QED_BDQ),
527 bdq_id);
528 } else {
529 DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
530 return NULL;
534 int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
536 struct qed_fcoe_info *p_fcoe_info;
538 /* Allocate LL2's set struct */
539 p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL);
540 if (!p_fcoe_info) {
541 DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n");
542 return -ENOMEM;
544 INIT_LIST_HEAD(&p_fcoe_info->free_list);
546 p_hwfn->p_fcoe_info = p_fcoe_info;
547 return 0;
550 void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
552 struct e4_fcoe_task_context *p_task_ctx = NULL;
553 u32 i, lc;
554 int rc;
556 spin_lock_init(&p_hwfn->p_fcoe_info->lock);
557 for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) {
558 rc = qed_cxt_get_task_ctx(p_hwfn, i,
559 QED_CTX_WORKING_MEM,
560 (void **)&p_task_ctx);
561 if (rc)
562 continue;
564 memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context));
566 lc = 0;
567 SET_FIELD(lc, TIMERS_CONTEXT_VALIDLC0, 1);
568 p_task_ctx->timer_context.logical_client_0 = cpu_to_le32(lc);
570 lc = 0;
571 SET_FIELD(lc, TIMERS_CONTEXT_VALIDLC1, 1);
572 p_task_ctx->timer_context.logical_client_1 = cpu_to_le32(lc);
574 SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
575 E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
579 void qed_fcoe_free(struct qed_hwfn *p_hwfn)
581 struct qed_fcoe_conn *p_conn = NULL;
583 if (!p_hwfn->p_fcoe_info)
584 return;
586 while (!list_empty(&p_hwfn->p_fcoe_info->free_list)) {
587 p_conn = list_first_entry(&p_hwfn->p_fcoe_info->free_list,
588 struct qed_fcoe_conn, list_entry);
589 if (!p_conn)
590 break;
591 list_del(&p_conn->list_entry);
592 qed_fcoe_free_connection(p_hwfn, p_conn);
595 kfree(p_hwfn->p_fcoe_info);
596 p_hwfn->p_fcoe_info = NULL;
599 static int
600 qed_fcoe_acquire_connection(struct qed_hwfn *p_hwfn,
601 struct qed_fcoe_conn *p_in_conn,
602 struct qed_fcoe_conn **p_out_conn)
604 struct qed_fcoe_conn *p_conn = NULL;
605 int rc = 0;
606 u32 icid;
608 spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
609 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &icid);
610 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
611 if (rc)
612 return rc;
614 /* Use input connection [if provided] or allocate a new one */
615 if (p_in_conn) {
616 p_conn = p_in_conn;
617 } else {
618 rc = qed_fcoe_allocate_connection(p_hwfn, &p_conn);
619 if (rc) {
620 spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
621 qed_cxt_release_cid(p_hwfn, icid);
622 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
623 return rc;
627 p_conn->icid = icid;
628 p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid;
629 *p_out_conn = p_conn;
631 return rc;
634 static void qed_fcoe_release_connection(struct qed_hwfn *p_hwfn,
635 struct qed_fcoe_conn *p_conn)
637 spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
638 list_add_tail(&p_conn->list_entry, &p_hwfn->p_fcoe_info->free_list);
639 qed_cxt_release_cid(p_hwfn, p_conn->icid);
640 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
643 static void _qed_fcoe_get_tstats(struct qed_hwfn *p_hwfn,
644 struct qed_ptt *p_ptt,
645 struct qed_fcoe_stats *p_stats)
647 struct fcoe_rx_stat tstats;
648 u32 tstats_addr;
650 memset(&tstats, 0, sizeof(tstats));
651 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
652 TSTORM_FCOE_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
653 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
655 p_stats->fcoe_rx_byte_cnt = HILO_64_REGPAIR(tstats.fcoe_rx_byte_cnt);
656 p_stats->fcoe_rx_data_pkt_cnt =
657 HILO_64_REGPAIR(tstats.fcoe_rx_data_pkt_cnt);
658 p_stats->fcoe_rx_xfer_pkt_cnt =
659 HILO_64_REGPAIR(tstats.fcoe_rx_xfer_pkt_cnt);
660 p_stats->fcoe_rx_other_pkt_cnt =
661 HILO_64_REGPAIR(tstats.fcoe_rx_other_pkt_cnt);
663 p_stats->fcoe_silent_drop_pkt_cmdq_full_cnt =
664 le32_to_cpu(tstats.fcoe_silent_drop_pkt_cmdq_full_cnt);
665 p_stats->fcoe_silent_drop_pkt_rq_full_cnt =
666 le32_to_cpu(tstats.fcoe_silent_drop_pkt_rq_full_cnt);
667 p_stats->fcoe_silent_drop_pkt_crc_error_cnt =
668 le32_to_cpu(tstats.fcoe_silent_drop_pkt_crc_error_cnt);
669 p_stats->fcoe_silent_drop_pkt_task_invalid_cnt =
670 le32_to_cpu(tstats.fcoe_silent_drop_pkt_task_invalid_cnt);
671 p_stats->fcoe_silent_drop_total_pkt_cnt =
672 le32_to_cpu(tstats.fcoe_silent_drop_total_pkt_cnt);
675 static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn,
676 struct qed_ptt *p_ptt,
677 struct qed_fcoe_stats *p_stats)
679 struct fcoe_tx_stat pstats;
680 u32 pstats_addr;
682 memset(&pstats, 0, sizeof(pstats));
683 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
684 PSTORM_FCOE_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
685 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
687 p_stats->fcoe_tx_byte_cnt = HILO_64_REGPAIR(pstats.fcoe_tx_byte_cnt);
688 p_stats->fcoe_tx_data_pkt_cnt =
689 HILO_64_REGPAIR(pstats.fcoe_tx_data_pkt_cnt);
690 p_stats->fcoe_tx_xfer_pkt_cnt =
691 HILO_64_REGPAIR(pstats.fcoe_tx_xfer_pkt_cnt);
692 p_stats->fcoe_tx_other_pkt_cnt =
693 HILO_64_REGPAIR(pstats.fcoe_tx_other_pkt_cnt);
696 static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn,
697 struct qed_fcoe_stats *p_stats)
699 struct qed_ptt *p_ptt;
701 memset(p_stats, 0, sizeof(*p_stats));
703 p_ptt = qed_ptt_acquire(p_hwfn);
705 if (!p_ptt) {
706 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
707 return -EINVAL;
710 _qed_fcoe_get_tstats(p_hwfn, p_ptt, p_stats);
711 _qed_fcoe_get_pstats(p_hwfn, p_ptt, p_stats);
713 qed_ptt_release(p_hwfn, p_ptt);
715 return 0;
718 struct qed_hash_fcoe_con {
719 struct hlist_node node;
720 struct qed_fcoe_conn *con;
723 static int qed_fill_fcoe_dev_info(struct qed_dev *cdev,
724 struct qed_dev_fcoe_info *info)
726 struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev);
727 int rc;
729 memset(info, 0, sizeof(*info));
730 rc = qed_fill_dev_info(cdev, &info->common);
732 info->primary_dbq_rq_addr =
733 qed_fcoe_get_primary_bdq_prod(hwfn, BDQ_ID_RQ);
734 info->secondary_bdq_rq_addr =
735 qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ);
737 info->wwpn = hwfn->mcp_info->func_info.wwn_port;
738 info->wwnn = hwfn->mcp_info->func_info.wwn_node;
740 info->num_cqs = FEAT_NUM(hwfn, QED_FCOE_CQ);
742 return rc;
745 static void qed_register_fcoe_ops(struct qed_dev *cdev,
746 struct qed_fcoe_cb_ops *ops, void *cookie)
748 cdev->protocol_ops.fcoe = ops;
749 cdev->ops_cookie = cookie;
752 static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev,
753 u32 handle)
755 struct qed_hash_fcoe_con *hash_con = NULL;
757 if (!(cdev->flags & QED_FLAG_STORAGE_STARTED))
758 return NULL;
760 hash_for_each_possible(cdev->connections, hash_con, node, handle) {
761 if (hash_con->con->icid == handle)
762 break;
765 if (!hash_con || (hash_con->con->icid != handle))
766 return NULL;
768 return hash_con;
771 static int qed_fcoe_stop(struct qed_dev *cdev)
773 struct qed_ptt *p_ptt;
774 int rc;
776 if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
777 DP_NOTICE(cdev, "fcoe already stopped\n");
778 return 0;
781 if (!hash_empty(cdev->connections)) {
782 DP_NOTICE(cdev,
783 "Can't stop fcoe - not all connections were returned\n");
784 return -EINVAL;
787 p_ptt = qed_ptt_acquire(QED_AFFIN_HWFN(cdev));
788 if (!p_ptt)
789 return -EAGAIN;
791 /* Stop the fcoe */
792 rc = qed_sp_fcoe_func_stop(QED_AFFIN_HWFN(cdev), p_ptt,
793 QED_SPQ_MODE_EBLOCK, NULL);
794 cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
795 qed_ptt_release(QED_AFFIN_HWFN(cdev), p_ptt);
797 return rc;
800 static int qed_fcoe_start(struct qed_dev *cdev, struct qed_fcoe_tid *tasks)
802 int rc;
804 if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
805 DP_NOTICE(cdev, "fcoe already started;\n");
806 return 0;
809 rc = qed_sp_fcoe_func_start(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK,
810 NULL);
811 if (rc) {
812 DP_NOTICE(cdev, "Failed to start fcoe\n");
813 return rc;
816 cdev->flags |= QED_FLAG_STORAGE_STARTED;
817 hash_init(cdev->connections);
819 if (tasks) {
820 struct qed_tid_mem *tid_info = kzalloc(sizeof(*tid_info),
821 GFP_ATOMIC);
823 if (!tid_info) {
824 DP_NOTICE(cdev,
825 "Failed to allocate tasks information\n");
826 qed_fcoe_stop(cdev);
827 return -ENOMEM;
830 rc = qed_cxt_get_tid_mem_info(QED_AFFIN_HWFN(cdev), tid_info);
831 if (rc) {
832 DP_NOTICE(cdev, "Failed to gather task information\n");
833 qed_fcoe_stop(cdev);
834 kfree(tid_info);
835 return rc;
838 /* Fill task information */
839 tasks->size = tid_info->tid_size;
840 tasks->num_tids_per_block = tid_info->num_tids_per_block;
841 memcpy(tasks->blocks, tid_info->blocks,
842 MAX_TID_BLOCKS_FCOE * sizeof(u8 *));
844 kfree(tid_info);
847 return 0;
850 static int qed_fcoe_acquire_conn(struct qed_dev *cdev,
851 u32 *handle,
852 u32 *fw_cid, void __iomem **p_doorbell)
854 struct qed_hash_fcoe_con *hash_con;
855 int rc;
857 /* Allocate a hashed connection */
858 hash_con = kzalloc(sizeof(*hash_con), GFP_KERNEL);
859 if (!hash_con) {
860 DP_NOTICE(cdev, "Failed to allocate hashed connection\n");
861 return -ENOMEM;
864 /* Acquire the connection */
865 rc = qed_fcoe_acquire_connection(QED_AFFIN_HWFN(cdev), NULL,
866 &hash_con->con);
867 if (rc) {
868 DP_NOTICE(cdev, "Failed to acquire Connection\n");
869 kfree(hash_con);
870 return rc;
873 /* Added the connection to hash table */
874 *handle = hash_con->con->icid;
875 *fw_cid = hash_con->con->fw_cid;
876 hash_add(cdev->connections, &hash_con->node, *handle);
878 if (p_doorbell)
879 *p_doorbell = qed_fcoe_get_db_addr(QED_AFFIN_HWFN(cdev),
880 *handle);
882 return 0;
885 static int qed_fcoe_release_conn(struct qed_dev *cdev, u32 handle)
887 struct qed_hash_fcoe_con *hash_con;
889 hash_con = qed_fcoe_get_hash(cdev, handle);
890 if (!hash_con) {
891 DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
892 handle);
893 return -EINVAL;
896 hlist_del(&hash_con->node);
897 qed_fcoe_release_connection(QED_AFFIN_HWFN(cdev), hash_con->con);
898 kfree(hash_con);
900 return 0;
903 static int qed_fcoe_offload_conn(struct qed_dev *cdev,
904 u32 handle,
905 struct qed_fcoe_params_offload *conn_info)
907 struct qed_hash_fcoe_con *hash_con;
908 struct qed_fcoe_conn *con;
910 hash_con = qed_fcoe_get_hash(cdev, handle);
911 if (!hash_con) {
912 DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
913 handle);
914 return -EINVAL;
917 /* Update the connection with information from the params */
918 con = hash_con->con;
920 con->sq_pbl_addr = conn_info->sq_pbl_addr;
921 con->sq_curr_page_addr = conn_info->sq_curr_page_addr;
922 con->sq_next_page_addr = conn_info->sq_next_page_addr;
923 con->tx_max_fc_pay_len = conn_info->tx_max_fc_pay_len;
924 con->e_d_tov_timer_val = conn_info->e_d_tov_timer_val;
925 con->rec_tov_timer_val = conn_info->rec_tov_timer_val;
926 con->rx_max_fc_pay_len = conn_info->rx_max_fc_pay_len;
927 con->vlan_tag = conn_info->vlan_tag;
928 con->max_conc_seqs_c3 = conn_info->max_conc_seqs_c3;
929 con->flags = conn_info->flags;
930 con->def_q_idx = conn_info->def_q_idx;
932 con->src_mac_addr_hi = (conn_info->src_mac[5] << 8) |
933 conn_info->src_mac[4];
934 con->src_mac_addr_mid = (conn_info->src_mac[3] << 8) |
935 conn_info->src_mac[2];
936 con->src_mac_addr_lo = (conn_info->src_mac[1] << 8) |
937 conn_info->src_mac[0];
938 con->dst_mac_addr_hi = (conn_info->dst_mac[5] << 8) |
939 conn_info->dst_mac[4];
940 con->dst_mac_addr_mid = (conn_info->dst_mac[3] << 8) |
941 conn_info->dst_mac[2];
942 con->dst_mac_addr_lo = (conn_info->dst_mac[1] << 8) |
943 conn_info->dst_mac[0];
945 con->s_id.addr_hi = conn_info->s_id.addr_hi;
946 con->s_id.addr_mid = conn_info->s_id.addr_mid;
947 con->s_id.addr_lo = conn_info->s_id.addr_lo;
948 con->d_id.addr_hi = conn_info->d_id.addr_hi;
949 con->d_id.addr_mid = conn_info->d_id.addr_mid;
950 con->d_id.addr_lo = conn_info->d_id.addr_lo;
952 return qed_sp_fcoe_conn_offload(QED_AFFIN_HWFN(cdev), con,
953 QED_SPQ_MODE_EBLOCK, NULL);
956 static int qed_fcoe_destroy_conn(struct qed_dev *cdev,
957 u32 handle, dma_addr_t terminate_params)
959 struct qed_hash_fcoe_con *hash_con;
960 struct qed_fcoe_conn *con;
962 hash_con = qed_fcoe_get_hash(cdev, handle);
963 if (!hash_con) {
964 DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
965 handle);
966 return -EINVAL;
969 /* Update the connection with information from the params */
970 con = hash_con->con;
971 con->terminate_params = terminate_params;
973 return qed_sp_fcoe_conn_destroy(QED_AFFIN_HWFN(cdev), con,
974 QED_SPQ_MODE_EBLOCK, NULL);
977 static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats)
979 return qed_fcoe_get_stats(QED_AFFIN_HWFN(cdev), stats);
982 void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
983 struct qed_mcp_fcoe_stats *stats)
985 struct qed_fcoe_stats proto_stats;
987 /* Retrieve FW statistics */
988 memset(&proto_stats, 0, sizeof(proto_stats));
989 if (qed_fcoe_stats(cdev, &proto_stats)) {
990 DP_VERBOSE(cdev, QED_MSG_STORAGE,
991 "Failed to collect FCoE statistics\n");
992 return;
995 /* Translate FW statistics into struct */
996 stats->rx_pkts = proto_stats.fcoe_rx_data_pkt_cnt +
997 proto_stats.fcoe_rx_xfer_pkt_cnt +
998 proto_stats.fcoe_rx_other_pkt_cnt;
999 stats->tx_pkts = proto_stats.fcoe_tx_data_pkt_cnt +
1000 proto_stats.fcoe_tx_xfer_pkt_cnt +
1001 proto_stats.fcoe_tx_other_pkt_cnt;
1002 stats->fcs_err = proto_stats.fcoe_silent_drop_pkt_crc_error_cnt;
1004 /* Request protocol driver to fill-in the rest */
1005 if (cdev->protocol_ops.fcoe && cdev->ops_cookie) {
1006 struct qed_fcoe_cb_ops *ops = cdev->protocol_ops.fcoe;
1007 void *cookie = cdev->ops_cookie;
1009 if (ops->get_login_failures)
1010 stats->login_failure = ops->get_login_failures(cookie);
1014 static const struct qed_fcoe_ops qed_fcoe_ops_pass = {
1015 .common = &qed_common_ops_pass,
1016 .ll2 = &qed_ll2_ops_pass,
1017 .fill_dev_info = &qed_fill_fcoe_dev_info,
1018 .start = &qed_fcoe_start,
1019 .stop = &qed_fcoe_stop,
1020 .register_ops = &qed_register_fcoe_ops,
1021 .acquire_conn = &qed_fcoe_acquire_conn,
1022 .release_conn = &qed_fcoe_release_conn,
1023 .offload_conn = &qed_fcoe_offload_conn,
1024 .destroy_conn = &qed_fcoe_destroy_conn,
1025 .get_stats = &qed_fcoe_stats,
1028 const struct qed_fcoe_ops *qed_get_fcoe_ops(void)
1030 return &qed_fcoe_ops_pass;
1032 EXPORT_SYMBOL(qed_get_fcoe_ops);
1034 void qed_put_fcoe_ops(void)
1037 EXPORT_SYMBOL(qed_put_fcoe_ops);