treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / net / ethernet / qlogic / qed / qed_fcoe.c
blob4c7fa391fd33c2ae5622ead2b5b28c4799d5a5c1
1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <asm/param.h>
36 #include <linux/delay.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/interrupt.h>
39 #include <linux/kernel.h>
40 #include <linux/log2.h>
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/slab.h>
44 #include <linux/stddef.h>
45 #include <linux/string.h>
46 #include <linux/workqueue.h>
47 #include <linux/errno.h>
48 #include <linux/list.h>
49 #include <linux/spinlock.h>
50 #define __PREVENT_DUMP_MEM_ARR__
51 #define __PREVENT_PXP_GLOBAL_WIN__
52 #include "qed.h"
53 #include "qed_cxt.h"
54 #include "qed_dev_api.h"
55 #include "qed_fcoe.h"
56 #include "qed_hsi.h"
57 #include "qed_hw.h"
58 #include "qed_int.h"
59 #include "qed_ll2.h"
60 #include "qed_mcp.h"
61 #include "qed_reg_addr.h"
62 #include "qed_sp.h"
63 #include "qed_sriov.h"
64 #include <linux/qed/qed_fcoe_if.h>
66 struct qed_fcoe_conn {
67 struct list_head list_entry;
68 bool free_on_delete;
70 u16 conn_id;
71 u32 icid;
72 u32 fw_cid;
73 u8 layer_code;
75 dma_addr_t sq_pbl_addr;
76 dma_addr_t sq_curr_page_addr;
77 dma_addr_t sq_next_page_addr;
78 dma_addr_t xferq_pbl_addr;
79 void *xferq_pbl_addr_virt_addr;
80 dma_addr_t xferq_addr[4];
81 void *xferq_addr_virt_addr[4];
82 dma_addr_t confq_pbl_addr;
83 void *confq_pbl_addr_virt_addr;
84 dma_addr_t confq_addr[2];
85 void *confq_addr_virt_addr[2];
87 dma_addr_t terminate_params;
89 u16 dst_mac_addr_lo;
90 u16 dst_mac_addr_mid;
91 u16 dst_mac_addr_hi;
92 u16 src_mac_addr_lo;
93 u16 src_mac_addr_mid;
94 u16 src_mac_addr_hi;
96 u16 tx_max_fc_pay_len;
97 u16 e_d_tov_timer_val;
98 u16 rec_tov_timer_val;
99 u16 rx_max_fc_pay_len;
100 u16 vlan_tag;
101 u16 physical_q0;
103 struct fc_addr_nw s_id;
104 u8 max_conc_seqs_c3;
105 struct fc_addr_nw d_id;
106 u8 flags;
107 u8 def_q_idx;
110 static int
111 qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
112 enum spq_mode comp_mode,
113 struct qed_spq_comp_cb *p_comp_addr)
115 struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
116 struct fcoe_init_ramrod_params *p_ramrod = NULL;
117 struct fcoe_init_func_ramrod_data *p_data;
118 struct e4_fcoe_conn_context *p_cxt = NULL;
119 struct qed_spq_entry *p_ent = NULL;
120 struct qed_sp_init_data init_data;
121 struct qed_cxt_info cxt_info;
122 u32 dummy_cid;
123 int rc = 0;
124 u16 tmp;
125 u8 i;
127 /* Get SPQ entry */
128 memset(&init_data, 0, sizeof(init_data));
129 init_data.cid = qed_spq_get_cid(p_hwfn);
130 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
131 init_data.comp_mode = comp_mode;
132 init_data.p_comp_data = p_comp_addr;
134 rc = qed_sp_init_request(p_hwfn, &p_ent,
135 FCOE_RAMROD_CMD_ID_INIT_FUNC,
136 PROTOCOLID_FCOE, &init_data);
137 if (rc)
138 return rc;
140 p_ramrod = &p_ent->ramrod.fcoe_init;
141 p_data = &p_ramrod->init_ramrod_data;
142 fcoe_pf_params = &p_hwfn->pf_params.fcoe_pf_params;
144 /* Sanity */
145 if (fcoe_pf_params->num_cqs > p_hwfn->hw_info.feat_num[QED_FCOE_CQ]) {
146 DP_ERR(p_hwfn,
147 "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n",
148 fcoe_pf_params->num_cqs,
149 p_hwfn->hw_info.feat_num[QED_FCOE_CQ]);
150 rc = -EINVAL;
151 goto err;
154 p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
155 tmp = cpu_to_le16(fcoe_pf_params->sq_num_pbl_pages);
156 p_data->sq_num_pages_in_pbl = tmp;
158 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid);
159 if (rc)
160 goto err;
162 cxt_info.iid = dummy_cid;
163 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
164 if (rc) {
165 DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n",
166 dummy_cid);
167 goto err;
169 p_cxt = cxt_info.p_cxt;
170 memset(p_cxt, 0, sizeof(*p_cxt));
172 SET_FIELD(p_cxt->tstorm_ag_context.flags3,
173 E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
175 fcoe_pf_params->dummy_icid = (u16)dummy_cid;
177 tmp = cpu_to_le16(fcoe_pf_params->num_tasks);
178 p_data->func_params.num_tasks = tmp;
179 p_data->func_params.log_page_size = fcoe_pf_params->log_page_size;
180 p_data->func_params.debug_mode = fcoe_pf_params->debug_mode;
182 DMA_REGPAIR_LE(p_data->q_params.glbl_q_params_addr,
183 fcoe_pf_params->glbl_q_params_addr);
185 tmp = cpu_to_le16(fcoe_pf_params->cq_num_entries);
186 p_data->q_params.cq_num_entries = tmp;
188 tmp = cpu_to_le16(fcoe_pf_params->cmdq_num_entries);
189 p_data->q_params.cmdq_num_entries = tmp;
191 tmp = fcoe_pf_params->num_cqs;
192 p_data->q_params.num_queues = (u8)tmp;
194 tmp = (u16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS];
195 p_data->q_params.queue_relative_offset = (u8)tmp;
197 for (i = 0; i < fcoe_pf_params->num_cqs; i++) {
198 u16 igu_sb_id;
200 igu_sb_id = qed_get_igu_sb_id(p_hwfn, i);
201 tmp = cpu_to_le16(igu_sb_id);
202 p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp;
205 p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi;
206 p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi;
208 p_data->q_params.bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
210 DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ],
211 fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
212 p_data->q_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
213 fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_RQ];
214 tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ];
215 p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
216 tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ];
217 p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
219 DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_IMM_DATA],
220 fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]);
221 p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA] =
222 fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA];
223 tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA];
224 p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
225 tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA];
226 p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
227 tmp = fcoe_pf_params->rq_buffer_size;
228 p_data->q_params.rq_buffer_size = cpu_to_le16(tmp);
230 if (fcoe_pf_params->is_target) {
231 SET_FIELD(p_data->q_params.q_validity,
232 SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
233 if (p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA])
234 SET_FIELD(p_data->q_params.q_validity,
235 SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1);
236 SET_FIELD(p_data->q_params.q_validity,
237 SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1);
238 } else {
239 SET_FIELD(p_data->q_params.q_validity,
240 SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
243 rc = qed_spq_post(p_hwfn, p_ent, NULL);
245 return rc;
247 err:
248 qed_sp_destroy_request(p_hwfn, p_ent);
249 return rc;
252 static int
253 qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
254 struct qed_fcoe_conn *p_conn,
255 enum spq_mode comp_mode,
256 struct qed_spq_comp_cb *p_comp_addr)
258 struct fcoe_conn_offload_ramrod_params *p_ramrod = NULL;
259 struct fcoe_conn_offload_ramrod_data *p_data;
260 struct qed_spq_entry *p_ent = NULL;
261 struct qed_sp_init_data init_data;
262 u16 physical_q0, tmp;
263 int rc;
265 /* Get SPQ entry */
266 memset(&init_data, 0, sizeof(init_data));
267 init_data.cid = p_conn->icid;
268 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
269 init_data.comp_mode = comp_mode;
270 init_data.p_comp_data = p_comp_addr;
272 rc = qed_sp_init_request(p_hwfn, &p_ent,
273 FCOE_RAMROD_CMD_ID_OFFLOAD_CONN,
274 PROTOCOLID_FCOE, &init_data);
275 if (rc)
276 return rc;
278 p_ramrod = &p_ent->ramrod.fcoe_conn_ofld;
279 p_data = &p_ramrod->offload_ramrod_data;
281 /* Transmission PQ is the first of the PF */
282 physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
283 p_conn->physical_q0 = cpu_to_le16(physical_q0);
284 p_data->physical_q0 = cpu_to_le16(physical_q0);
286 p_data->conn_id = cpu_to_le16(p_conn->conn_id);
287 DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr);
288 DMA_REGPAIR_LE(p_data->sq_curr_page_addr, p_conn->sq_curr_page_addr);
289 DMA_REGPAIR_LE(p_data->sq_next_page_addr, p_conn->sq_next_page_addr);
290 DMA_REGPAIR_LE(p_data->xferq_pbl_addr, p_conn->xferq_pbl_addr);
291 DMA_REGPAIR_LE(p_data->xferq_curr_page_addr, p_conn->xferq_addr[0]);
292 DMA_REGPAIR_LE(p_data->xferq_next_page_addr, p_conn->xferq_addr[1]);
294 DMA_REGPAIR_LE(p_data->respq_pbl_addr, p_conn->confq_pbl_addr);
295 DMA_REGPAIR_LE(p_data->respq_curr_page_addr, p_conn->confq_addr[0]);
296 DMA_REGPAIR_LE(p_data->respq_next_page_addr, p_conn->confq_addr[1]);
298 p_data->dst_mac_addr_lo = cpu_to_le16(p_conn->dst_mac_addr_lo);
299 p_data->dst_mac_addr_mid = cpu_to_le16(p_conn->dst_mac_addr_mid);
300 p_data->dst_mac_addr_hi = cpu_to_le16(p_conn->dst_mac_addr_hi);
301 p_data->src_mac_addr_lo = cpu_to_le16(p_conn->src_mac_addr_lo);
302 p_data->src_mac_addr_mid = cpu_to_le16(p_conn->src_mac_addr_mid);
303 p_data->src_mac_addr_hi = cpu_to_le16(p_conn->src_mac_addr_hi);
305 tmp = cpu_to_le16(p_conn->tx_max_fc_pay_len);
306 p_data->tx_max_fc_pay_len = tmp;
307 tmp = cpu_to_le16(p_conn->e_d_tov_timer_val);
308 p_data->e_d_tov_timer_val = tmp;
309 tmp = cpu_to_le16(p_conn->rec_tov_timer_val);
310 p_data->rec_rr_tov_timer_val = tmp;
311 tmp = cpu_to_le16(p_conn->rx_max_fc_pay_len);
312 p_data->rx_max_fc_pay_len = tmp;
314 p_data->vlan_tag = cpu_to_le16(p_conn->vlan_tag);
315 p_data->s_id.addr_hi = p_conn->s_id.addr_hi;
316 p_data->s_id.addr_mid = p_conn->s_id.addr_mid;
317 p_data->s_id.addr_lo = p_conn->s_id.addr_lo;
318 p_data->max_conc_seqs_c3 = p_conn->max_conc_seqs_c3;
319 p_data->d_id.addr_hi = p_conn->d_id.addr_hi;
320 p_data->d_id.addr_mid = p_conn->d_id.addr_mid;
321 p_data->d_id.addr_lo = p_conn->d_id.addr_lo;
322 p_data->flags = p_conn->flags;
323 if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
324 SET_FIELD(p_data->flags,
325 FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN, 1);
326 p_data->def_q_idx = p_conn->def_q_idx;
328 return qed_spq_post(p_hwfn, p_ent, NULL);
331 static int
332 qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn,
333 struct qed_fcoe_conn *p_conn,
334 enum spq_mode comp_mode,
335 struct qed_spq_comp_cb *p_comp_addr)
337 struct fcoe_conn_terminate_ramrod_params *p_ramrod = NULL;
338 struct qed_spq_entry *p_ent = NULL;
339 struct qed_sp_init_data init_data;
340 int rc = 0;
342 /* Get SPQ entry */
343 memset(&init_data, 0, sizeof(init_data));
344 init_data.cid = p_conn->icid;
345 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
346 init_data.comp_mode = comp_mode;
347 init_data.p_comp_data = p_comp_addr;
349 rc = qed_sp_init_request(p_hwfn, &p_ent,
350 FCOE_RAMROD_CMD_ID_TERMINATE_CONN,
351 PROTOCOLID_FCOE, &init_data);
352 if (rc)
353 return rc;
355 p_ramrod = &p_ent->ramrod.fcoe_conn_terminate;
356 DMA_REGPAIR_LE(p_ramrod->terminate_ramrod_data.terminate_params_addr,
357 p_conn->terminate_params);
359 return qed_spq_post(p_hwfn, p_ent, NULL);
362 static int
363 qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn,
364 struct qed_ptt *p_ptt,
365 enum spq_mode comp_mode,
366 struct qed_spq_comp_cb *p_comp_addr)
368 struct qed_spq_entry *p_ent = NULL;
369 struct qed_sp_init_data init_data;
370 u32 active_segs = 0;
371 int rc = 0;
373 /* Get SPQ entry */
374 memset(&init_data, 0, sizeof(init_data));
375 init_data.cid = p_hwfn->pf_params.fcoe_pf_params.dummy_icid;
376 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
377 init_data.comp_mode = comp_mode;
378 init_data.p_comp_data = p_comp_addr;
380 rc = qed_sp_init_request(p_hwfn, &p_ent,
381 FCOE_RAMROD_CMD_ID_DESTROY_FUNC,
382 PROTOCOLID_FCOE, &init_data);
383 if (rc)
384 return rc;
386 active_segs = qed_rd(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK);
387 active_segs &= ~BIT(QED_CXT_FCOE_TID_SEG);
388 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, active_segs);
390 return qed_spq_post(p_hwfn, p_ent, NULL);
393 static int
394 qed_fcoe_allocate_connection(struct qed_hwfn *p_hwfn,
395 struct qed_fcoe_conn **p_out_conn)
397 struct qed_fcoe_conn *p_conn = NULL;
398 void *p_addr;
399 u32 i;
401 spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
402 if (!list_empty(&p_hwfn->p_fcoe_info->free_list))
403 p_conn =
404 list_first_entry(&p_hwfn->p_fcoe_info->free_list,
405 struct qed_fcoe_conn, list_entry);
406 if (p_conn) {
407 list_del(&p_conn->list_entry);
408 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
409 *p_out_conn = p_conn;
410 return 0;
412 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
414 p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL);
415 if (!p_conn)
416 return -ENOMEM;
418 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
419 QED_CHAIN_PAGE_SIZE,
420 &p_conn->xferq_pbl_addr, GFP_KERNEL);
421 if (!p_addr)
422 goto nomem_pbl_xferq;
423 p_conn->xferq_pbl_addr_virt_addr = p_addr;
425 for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
426 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
427 QED_CHAIN_PAGE_SIZE,
428 &p_conn->xferq_addr[i], GFP_KERNEL);
429 if (!p_addr)
430 goto nomem_xferq;
431 p_conn->xferq_addr_virt_addr[i] = p_addr;
433 p_addr = p_conn->xferq_pbl_addr_virt_addr;
434 ((dma_addr_t *)p_addr)[i] = p_conn->xferq_addr[i];
437 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
438 QED_CHAIN_PAGE_SIZE,
439 &p_conn->confq_pbl_addr, GFP_KERNEL);
440 if (!p_addr)
441 goto nomem_xferq;
442 p_conn->confq_pbl_addr_virt_addr = p_addr;
444 for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
445 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
446 QED_CHAIN_PAGE_SIZE,
447 &p_conn->confq_addr[i], GFP_KERNEL);
448 if (!p_addr)
449 goto nomem_confq;
450 p_conn->confq_addr_virt_addr[i] = p_addr;
452 p_addr = p_conn->confq_pbl_addr_virt_addr;
453 ((dma_addr_t *)p_addr)[i] = p_conn->confq_addr[i];
456 p_conn->free_on_delete = true;
457 *p_out_conn = p_conn;
458 return 0;
460 nomem_confq:
461 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
462 QED_CHAIN_PAGE_SIZE,
463 p_conn->confq_pbl_addr_virt_addr,
464 p_conn->confq_pbl_addr);
465 for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++)
466 if (p_conn->confq_addr_virt_addr[i])
467 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
468 QED_CHAIN_PAGE_SIZE,
469 p_conn->confq_addr_virt_addr[i],
470 p_conn->confq_addr[i]);
471 nomem_xferq:
472 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
473 QED_CHAIN_PAGE_SIZE,
474 p_conn->xferq_pbl_addr_virt_addr,
475 p_conn->xferq_pbl_addr);
476 for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++)
477 if (p_conn->xferq_addr_virt_addr[i])
478 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
479 QED_CHAIN_PAGE_SIZE,
480 p_conn->xferq_addr_virt_addr[i],
481 p_conn->xferq_addr[i]);
482 nomem_pbl_xferq:
483 kfree(p_conn);
484 return -ENOMEM;
487 static void qed_fcoe_free_connection(struct qed_hwfn *p_hwfn,
488 struct qed_fcoe_conn *p_conn)
490 u32 i;
492 if (!p_conn)
493 return;
495 if (p_conn->confq_pbl_addr_virt_addr)
496 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
497 QED_CHAIN_PAGE_SIZE,
498 p_conn->confq_pbl_addr_virt_addr,
499 p_conn->confq_pbl_addr);
501 for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
502 if (!p_conn->confq_addr_virt_addr[i])
503 continue;
504 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
505 QED_CHAIN_PAGE_SIZE,
506 p_conn->confq_addr_virt_addr[i],
507 p_conn->confq_addr[i]);
510 if (p_conn->xferq_pbl_addr_virt_addr)
511 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
512 QED_CHAIN_PAGE_SIZE,
513 p_conn->xferq_pbl_addr_virt_addr,
514 p_conn->xferq_pbl_addr);
516 for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
517 if (!p_conn->xferq_addr_virt_addr[i])
518 continue;
519 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
520 QED_CHAIN_PAGE_SIZE,
521 p_conn->xferq_addr_virt_addr[i],
522 p_conn->xferq_addr[i]);
524 kfree(p_conn);
527 static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
529 return (u8 __iomem *)p_hwfn->doorbells +
530 qed_db_addr(cid, DQ_DEMS_LEGACY);
533 static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
534 u8 bdq_id)
536 if (RESC_NUM(p_hwfn, QED_BDQ)) {
537 return (u8 __iomem *)p_hwfn->regview +
538 GTT_BAR0_MAP_REG_MSDM_RAM +
539 MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
540 QED_BDQ),
541 bdq_id);
542 } else {
543 DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
544 return NULL;
548 static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
549 u8 bdq_id)
551 if (RESC_NUM(p_hwfn, QED_BDQ)) {
552 return (u8 __iomem *)p_hwfn->regview +
553 GTT_BAR0_MAP_REG_TSDM_RAM +
554 TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
555 QED_BDQ),
556 bdq_id);
557 } else {
558 DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
559 return NULL;
563 int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
565 struct qed_fcoe_info *p_fcoe_info;
567 /* Allocate LL2's set struct */
568 p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL);
569 if (!p_fcoe_info) {
570 DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n");
571 return -ENOMEM;
573 INIT_LIST_HEAD(&p_fcoe_info->free_list);
575 p_hwfn->p_fcoe_info = p_fcoe_info;
576 return 0;
579 void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
581 struct e4_fcoe_task_context *p_task_ctx = NULL;
582 int rc;
583 u32 i;
585 spin_lock_init(&p_hwfn->p_fcoe_info->lock);
586 for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) {
587 rc = qed_cxt_get_task_ctx(p_hwfn, i,
588 QED_CTX_WORKING_MEM,
589 (void **)&p_task_ctx);
590 if (rc)
591 continue;
593 memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context));
594 SET_FIELD(p_task_ctx->timer_context.logical_client_0,
595 TIMERS_CONTEXT_VALIDLC0, 1);
596 SET_FIELD(p_task_ctx->timer_context.logical_client_1,
597 TIMERS_CONTEXT_VALIDLC1, 1);
598 SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
599 E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
603 void qed_fcoe_free(struct qed_hwfn *p_hwfn)
605 struct qed_fcoe_conn *p_conn = NULL;
607 if (!p_hwfn->p_fcoe_info)
608 return;
610 while (!list_empty(&p_hwfn->p_fcoe_info->free_list)) {
611 p_conn = list_first_entry(&p_hwfn->p_fcoe_info->free_list,
612 struct qed_fcoe_conn, list_entry);
613 if (!p_conn)
614 break;
615 list_del(&p_conn->list_entry);
616 qed_fcoe_free_connection(p_hwfn, p_conn);
619 kfree(p_hwfn->p_fcoe_info);
620 p_hwfn->p_fcoe_info = NULL;
623 static int
624 qed_fcoe_acquire_connection(struct qed_hwfn *p_hwfn,
625 struct qed_fcoe_conn *p_in_conn,
626 struct qed_fcoe_conn **p_out_conn)
628 struct qed_fcoe_conn *p_conn = NULL;
629 int rc = 0;
630 u32 icid;
632 spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
633 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &icid);
634 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
635 if (rc)
636 return rc;
638 /* Use input connection [if provided] or allocate a new one */
639 if (p_in_conn) {
640 p_conn = p_in_conn;
641 } else {
642 rc = qed_fcoe_allocate_connection(p_hwfn, &p_conn);
643 if (rc) {
644 spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
645 qed_cxt_release_cid(p_hwfn, icid);
646 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
647 return rc;
651 p_conn->icid = icid;
652 p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid;
653 *p_out_conn = p_conn;
655 return rc;
658 static void qed_fcoe_release_connection(struct qed_hwfn *p_hwfn,
659 struct qed_fcoe_conn *p_conn)
661 spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
662 list_add_tail(&p_conn->list_entry, &p_hwfn->p_fcoe_info->free_list);
663 qed_cxt_release_cid(p_hwfn, p_conn->icid);
664 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
667 static void _qed_fcoe_get_tstats(struct qed_hwfn *p_hwfn,
668 struct qed_ptt *p_ptt,
669 struct qed_fcoe_stats *p_stats)
671 struct fcoe_rx_stat tstats;
672 u32 tstats_addr;
674 memset(&tstats, 0, sizeof(tstats));
675 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
676 TSTORM_FCOE_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
677 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
679 p_stats->fcoe_rx_byte_cnt = HILO_64_REGPAIR(tstats.fcoe_rx_byte_cnt);
680 p_stats->fcoe_rx_data_pkt_cnt =
681 HILO_64_REGPAIR(tstats.fcoe_rx_data_pkt_cnt);
682 p_stats->fcoe_rx_xfer_pkt_cnt =
683 HILO_64_REGPAIR(tstats.fcoe_rx_xfer_pkt_cnt);
684 p_stats->fcoe_rx_other_pkt_cnt =
685 HILO_64_REGPAIR(tstats.fcoe_rx_other_pkt_cnt);
687 p_stats->fcoe_silent_drop_pkt_cmdq_full_cnt =
688 le32_to_cpu(tstats.fcoe_silent_drop_pkt_cmdq_full_cnt);
689 p_stats->fcoe_silent_drop_pkt_rq_full_cnt =
690 le32_to_cpu(tstats.fcoe_silent_drop_pkt_rq_full_cnt);
691 p_stats->fcoe_silent_drop_pkt_crc_error_cnt =
692 le32_to_cpu(tstats.fcoe_silent_drop_pkt_crc_error_cnt);
693 p_stats->fcoe_silent_drop_pkt_task_invalid_cnt =
694 le32_to_cpu(tstats.fcoe_silent_drop_pkt_task_invalid_cnt);
695 p_stats->fcoe_silent_drop_total_pkt_cnt =
696 le32_to_cpu(tstats.fcoe_silent_drop_total_pkt_cnt);
699 static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn,
700 struct qed_ptt *p_ptt,
701 struct qed_fcoe_stats *p_stats)
703 struct fcoe_tx_stat pstats;
704 u32 pstats_addr;
706 memset(&pstats, 0, sizeof(pstats));
707 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
708 PSTORM_FCOE_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
709 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
711 p_stats->fcoe_tx_byte_cnt = HILO_64_REGPAIR(pstats.fcoe_tx_byte_cnt);
712 p_stats->fcoe_tx_data_pkt_cnt =
713 HILO_64_REGPAIR(pstats.fcoe_tx_data_pkt_cnt);
714 p_stats->fcoe_tx_xfer_pkt_cnt =
715 HILO_64_REGPAIR(pstats.fcoe_tx_xfer_pkt_cnt);
716 p_stats->fcoe_tx_other_pkt_cnt =
717 HILO_64_REGPAIR(pstats.fcoe_tx_other_pkt_cnt);
720 static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn,
721 struct qed_fcoe_stats *p_stats)
723 struct qed_ptt *p_ptt;
725 memset(p_stats, 0, sizeof(*p_stats));
727 p_ptt = qed_ptt_acquire(p_hwfn);
729 if (!p_ptt) {
730 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
731 return -EINVAL;
734 _qed_fcoe_get_tstats(p_hwfn, p_ptt, p_stats);
735 _qed_fcoe_get_pstats(p_hwfn, p_ptt, p_stats);
737 qed_ptt_release(p_hwfn, p_ptt);
739 return 0;
742 struct qed_hash_fcoe_con {
743 struct hlist_node node;
744 struct qed_fcoe_conn *con;
747 static int qed_fill_fcoe_dev_info(struct qed_dev *cdev,
748 struct qed_dev_fcoe_info *info)
750 struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev);
751 int rc;
753 memset(info, 0, sizeof(*info));
754 rc = qed_fill_dev_info(cdev, &info->common);
756 info->primary_dbq_rq_addr =
757 qed_fcoe_get_primary_bdq_prod(hwfn, BDQ_ID_RQ);
758 info->secondary_bdq_rq_addr =
759 qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ);
761 info->wwpn = hwfn->mcp_info->func_info.wwn_port;
762 info->wwnn = hwfn->mcp_info->func_info.wwn_node;
764 info->num_cqs = FEAT_NUM(hwfn, QED_FCOE_CQ);
766 return rc;
769 static void qed_register_fcoe_ops(struct qed_dev *cdev,
770 struct qed_fcoe_cb_ops *ops, void *cookie)
772 cdev->protocol_ops.fcoe = ops;
773 cdev->ops_cookie = cookie;
776 static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev,
777 u32 handle)
779 struct qed_hash_fcoe_con *hash_con = NULL;
781 if (!(cdev->flags & QED_FLAG_STORAGE_STARTED))
782 return NULL;
784 hash_for_each_possible(cdev->connections, hash_con, node, handle) {
785 if (hash_con->con->icid == handle)
786 break;
789 if (!hash_con || (hash_con->con->icid != handle))
790 return NULL;
792 return hash_con;
795 static int qed_fcoe_stop(struct qed_dev *cdev)
797 struct qed_ptt *p_ptt;
798 int rc;
800 if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
801 DP_NOTICE(cdev, "fcoe already stopped\n");
802 return 0;
805 if (!hash_empty(cdev->connections)) {
806 DP_NOTICE(cdev,
807 "Can't stop fcoe - not all connections were returned\n");
808 return -EINVAL;
811 p_ptt = qed_ptt_acquire(QED_AFFIN_HWFN(cdev));
812 if (!p_ptt)
813 return -EAGAIN;
815 /* Stop the fcoe */
816 rc = qed_sp_fcoe_func_stop(QED_AFFIN_HWFN(cdev), p_ptt,
817 QED_SPQ_MODE_EBLOCK, NULL);
818 cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
819 qed_ptt_release(QED_AFFIN_HWFN(cdev), p_ptt);
821 return rc;
824 static int qed_fcoe_start(struct qed_dev *cdev, struct qed_fcoe_tid *tasks)
826 int rc;
828 if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
829 DP_NOTICE(cdev, "fcoe already started;\n");
830 return 0;
833 rc = qed_sp_fcoe_func_start(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK,
834 NULL);
835 if (rc) {
836 DP_NOTICE(cdev, "Failed to start fcoe\n");
837 return rc;
840 cdev->flags |= QED_FLAG_STORAGE_STARTED;
841 hash_init(cdev->connections);
843 if (tasks) {
844 struct qed_tid_mem *tid_info = kzalloc(sizeof(*tid_info),
845 GFP_ATOMIC);
847 if (!tid_info) {
848 DP_NOTICE(cdev,
849 "Failed to allocate tasks information\n");
850 qed_fcoe_stop(cdev);
851 return -ENOMEM;
854 rc = qed_cxt_get_tid_mem_info(QED_AFFIN_HWFN(cdev), tid_info);
855 if (rc) {
856 DP_NOTICE(cdev, "Failed to gather task information\n");
857 qed_fcoe_stop(cdev);
858 kfree(tid_info);
859 return rc;
862 /* Fill task information */
863 tasks->size = tid_info->tid_size;
864 tasks->num_tids_per_block = tid_info->num_tids_per_block;
865 memcpy(tasks->blocks, tid_info->blocks,
866 MAX_TID_BLOCKS_FCOE * sizeof(u8 *));
868 kfree(tid_info);
871 return 0;
874 static int qed_fcoe_acquire_conn(struct qed_dev *cdev,
875 u32 *handle,
876 u32 *fw_cid, void __iomem **p_doorbell)
878 struct qed_hash_fcoe_con *hash_con;
879 int rc;
881 /* Allocate a hashed connection */
882 hash_con = kzalloc(sizeof(*hash_con), GFP_KERNEL);
883 if (!hash_con) {
884 DP_NOTICE(cdev, "Failed to allocate hashed connection\n");
885 return -ENOMEM;
888 /* Acquire the connection */
889 rc = qed_fcoe_acquire_connection(QED_AFFIN_HWFN(cdev), NULL,
890 &hash_con->con);
891 if (rc) {
892 DP_NOTICE(cdev, "Failed to acquire Connection\n");
893 kfree(hash_con);
894 return rc;
897 /* Added the connection to hash table */
898 *handle = hash_con->con->icid;
899 *fw_cid = hash_con->con->fw_cid;
900 hash_add(cdev->connections, &hash_con->node, *handle);
902 if (p_doorbell)
903 *p_doorbell = qed_fcoe_get_db_addr(QED_AFFIN_HWFN(cdev),
904 *handle);
906 return 0;
909 static int qed_fcoe_release_conn(struct qed_dev *cdev, u32 handle)
911 struct qed_hash_fcoe_con *hash_con;
913 hash_con = qed_fcoe_get_hash(cdev, handle);
914 if (!hash_con) {
915 DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
916 handle);
917 return -EINVAL;
920 hlist_del(&hash_con->node);
921 qed_fcoe_release_connection(QED_AFFIN_HWFN(cdev), hash_con->con);
922 kfree(hash_con);
924 return 0;
927 static int qed_fcoe_offload_conn(struct qed_dev *cdev,
928 u32 handle,
929 struct qed_fcoe_params_offload *conn_info)
931 struct qed_hash_fcoe_con *hash_con;
932 struct qed_fcoe_conn *con;
934 hash_con = qed_fcoe_get_hash(cdev, handle);
935 if (!hash_con) {
936 DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
937 handle);
938 return -EINVAL;
941 /* Update the connection with information from the params */
942 con = hash_con->con;
944 con->sq_pbl_addr = conn_info->sq_pbl_addr;
945 con->sq_curr_page_addr = conn_info->sq_curr_page_addr;
946 con->sq_next_page_addr = conn_info->sq_next_page_addr;
947 con->tx_max_fc_pay_len = conn_info->tx_max_fc_pay_len;
948 con->e_d_tov_timer_val = conn_info->e_d_tov_timer_val;
949 con->rec_tov_timer_val = conn_info->rec_tov_timer_val;
950 con->rx_max_fc_pay_len = conn_info->rx_max_fc_pay_len;
951 con->vlan_tag = conn_info->vlan_tag;
952 con->max_conc_seqs_c3 = conn_info->max_conc_seqs_c3;
953 con->flags = conn_info->flags;
954 con->def_q_idx = conn_info->def_q_idx;
956 con->src_mac_addr_hi = (conn_info->src_mac[5] << 8) |
957 conn_info->src_mac[4];
958 con->src_mac_addr_mid = (conn_info->src_mac[3] << 8) |
959 conn_info->src_mac[2];
960 con->src_mac_addr_lo = (conn_info->src_mac[1] << 8) |
961 conn_info->src_mac[0];
962 con->dst_mac_addr_hi = (conn_info->dst_mac[5] << 8) |
963 conn_info->dst_mac[4];
964 con->dst_mac_addr_mid = (conn_info->dst_mac[3] << 8) |
965 conn_info->dst_mac[2];
966 con->dst_mac_addr_lo = (conn_info->dst_mac[1] << 8) |
967 conn_info->dst_mac[0];
969 con->s_id.addr_hi = conn_info->s_id.addr_hi;
970 con->s_id.addr_mid = conn_info->s_id.addr_mid;
971 con->s_id.addr_lo = conn_info->s_id.addr_lo;
972 con->d_id.addr_hi = conn_info->d_id.addr_hi;
973 con->d_id.addr_mid = conn_info->d_id.addr_mid;
974 con->d_id.addr_lo = conn_info->d_id.addr_lo;
976 return qed_sp_fcoe_conn_offload(QED_AFFIN_HWFN(cdev), con,
977 QED_SPQ_MODE_EBLOCK, NULL);
980 static int qed_fcoe_destroy_conn(struct qed_dev *cdev,
981 u32 handle, dma_addr_t terminate_params)
983 struct qed_hash_fcoe_con *hash_con;
984 struct qed_fcoe_conn *con;
986 hash_con = qed_fcoe_get_hash(cdev, handle);
987 if (!hash_con) {
988 DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
989 handle);
990 return -EINVAL;
993 /* Update the connection with information from the params */
994 con = hash_con->con;
995 con->terminate_params = terminate_params;
997 return qed_sp_fcoe_conn_destroy(QED_AFFIN_HWFN(cdev), con,
998 QED_SPQ_MODE_EBLOCK, NULL);
1001 static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats)
1003 return qed_fcoe_get_stats(QED_AFFIN_HWFN(cdev), stats);
1006 void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
1007 struct qed_mcp_fcoe_stats *stats)
1009 struct qed_fcoe_stats proto_stats;
1011 /* Retrieve FW statistics */
1012 memset(&proto_stats, 0, sizeof(proto_stats));
1013 if (qed_fcoe_stats(cdev, &proto_stats)) {
1014 DP_VERBOSE(cdev, QED_MSG_STORAGE,
1015 "Failed to collect FCoE statistics\n");
1016 return;
1019 /* Translate FW statistics into struct */
1020 stats->rx_pkts = proto_stats.fcoe_rx_data_pkt_cnt +
1021 proto_stats.fcoe_rx_xfer_pkt_cnt +
1022 proto_stats.fcoe_rx_other_pkt_cnt;
1023 stats->tx_pkts = proto_stats.fcoe_tx_data_pkt_cnt +
1024 proto_stats.fcoe_tx_xfer_pkt_cnt +
1025 proto_stats.fcoe_tx_other_pkt_cnt;
1026 stats->fcs_err = proto_stats.fcoe_silent_drop_pkt_crc_error_cnt;
1028 /* Request protocol driver to fill-in the rest */
1029 if (cdev->protocol_ops.fcoe && cdev->ops_cookie) {
1030 struct qed_fcoe_cb_ops *ops = cdev->protocol_ops.fcoe;
1031 void *cookie = cdev->ops_cookie;
1033 if (ops->get_login_failures)
1034 stats->login_failure = ops->get_login_failures(cookie);
1038 static const struct qed_fcoe_ops qed_fcoe_ops_pass = {
1039 .common = &qed_common_ops_pass,
1040 .ll2 = &qed_ll2_ops_pass,
1041 .fill_dev_info = &qed_fill_fcoe_dev_info,
1042 .start = &qed_fcoe_start,
1043 .stop = &qed_fcoe_stop,
1044 .register_ops = &qed_register_fcoe_ops,
1045 .acquire_conn = &qed_fcoe_acquire_conn,
1046 .release_conn = &qed_fcoe_release_conn,
1047 .offload_conn = &qed_fcoe_offload_conn,
1048 .destroy_conn = &qed_fcoe_destroy_conn,
1049 .get_stats = &qed_fcoe_stats,
1052 const struct qed_fcoe_ops *qed_get_fcoe_ops(void)
1054 return &qed_fcoe_ops_pass;
1056 EXPORT_SYMBOL(qed_get_fcoe_ops);
1058 void qed_put_fcoe_ops(void)
1061 EXPORT_SYMBOL(qed_put_fcoe_ops);