1 /* bnx2fc_hwi.c: QLogic Linux FCoE offload driver.
2 * This file contains the code that low level functions that interact
3 * with 57712 FCoE firmware.
5 * Copyright (c) 2008-2013 Broadcom Corporation
6 * Copyright (c) 2014-2016 QLogic Corporation
7 * Copyright (c) 2016-2017 Cavium Inc.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation.
13 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
18 DECLARE_PER_CPU(struct bnx2fc_percpu_s
, bnx2fc_percpu
);
20 static void bnx2fc_fastpath_notification(struct bnx2fc_hba
*hba
,
21 struct fcoe_kcqe
*new_cqe_kcqe
);
22 static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba
*hba
,
23 struct fcoe_kcqe
*ofld_kcqe
);
24 static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba
*hba
,
25 struct fcoe_kcqe
*ofld_kcqe
);
26 static void bnx2fc_init_failure(struct bnx2fc_hba
*hba
, u32 err_code
);
27 static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba
*hba
,
28 struct fcoe_kcqe
*destroy_kcqe
);
30 int bnx2fc_send_stat_req(struct bnx2fc_hba
*hba
)
32 struct fcoe_kwqe_stat stat_req
;
33 struct kwqe
*kwqe_arr
[2];
37 memset(&stat_req
, 0x00, sizeof(struct fcoe_kwqe_stat
));
38 stat_req
.hdr
.op_code
= FCOE_KWQE_OPCODE_STAT
;
40 (FCOE_KWQE_LAYER_CODE
<< FCOE_KWQE_HEADER_LAYER_CODE_SHIFT
);
42 stat_req
.stat_params_addr_lo
= (u32
) hba
->stats_buf_dma
;
43 stat_req
.stat_params_addr_hi
= (u32
) ((u64
)hba
->stats_buf_dma
>> 32);
45 kwqe_arr
[0] = (struct kwqe
*) &stat_req
;
47 if (hba
->cnic
&& hba
->cnic
->submit_kwqes
)
48 rc
= hba
->cnic
->submit_kwqes(hba
->cnic
, kwqe_arr
, num_kwqes
);
54 * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
56 * @hba: adapter structure pointer
58 * Send down FCoE firmware init KWQEs which initiates the initial handshake
62 int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba
*hba
)
64 struct fcoe_kwqe_init1 fcoe_init1
;
65 struct fcoe_kwqe_init2 fcoe_init2
;
66 struct fcoe_kwqe_init3 fcoe_init3
;
67 struct kwqe
*kwqe_arr
[3];
72 printk(KERN_ERR PFX
"hba->cnic NULL during fcoe fw init\n");
77 memset(&fcoe_init1
, 0x00, sizeof(struct fcoe_kwqe_init1
));
78 fcoe_init1
.hdr
.op_code
= FCOE_KWQE_OPCODE_INIT1
;
79 fcoe_init1
.hdr
.flags
= (FCOE_KWQE_LAYER_CODE
<<
80 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT
);
82 fcoe_init1
.num_tasks
= hba
->max_tasks
;
83 fcoe_init1
.sq_num_wqes
= BNX2FC_SQ_WQES_MAX
;
84 fcoe_init1
.rq_num_wqes
= BNX2FC_RQ_WQES_MAX
;
85 fcoe_init1
.rq_buffer_log_size
= BNX2FC_RQ_BUF_LOG_SZ
;
86 fcoe_init1
.cq_num_wqes
= BNX2FC_CQ_WQES_MAX
;
87 fcoe_init1
.dummy_buffer_addr_lo
= (u32
) hba
->dummy_buf_dma
;
88 fcoe_init1
.dummy_buffer_addr_hi
= (u32
) ((u64
)hba
->dummy_buf_dma
>> 32);
89 fcoe_init1
.task_list_pbl_addr_lo
= (u32
) hba
->task_ctx_bd_dma
;
90 fcoe_init1
.task_list_pbl_addr_hi
=
91 (u32
) ((u64
) hba
->task_ctx_bd_dma
>> 32);
92 fcoe_init1
.mtu
= BNX2FC_MINI_JUMBO_MTU
;
94 fcoe_init1
.flags
= (PAGE_SHIFT
<<
95 FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT
);
97 fcoe_init1
.num_sessions_log
= BNX2FC_NUM_MAX_SESS_LOG
;
100 memset(&fcoe_init2
, 0x00, sizeof(struct fcoe_kwqe_init2
));
101 fcoe_init2
.hdr
.op_code
= FCOE_KWQE_OPCODE_INIT2
;
102 fcoe_init2
.hdr
.flags
= (FCOE_KWQE_LAYER_CODE
<<
103 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT
);
105 fcoe_init2
.hsi_major_version
= FCOE_HSI_MAJOR_VERSION
;
106 fcoe_init2
.hsi_minor_version
= FCOE_HSI_MINOR_VERSION
;
109 fcoe_init2
.hash_tbl_pbl_addr_lo
= (u32
) hba
->hash_tbl_pbl_dma
;
110 fcoe_init2
.hash_tbl_pbl_addr_hi
= (u32
)
111 ((u64
) hba
->hash_tbl_pbl_dma
>> 32);
113 fcoe_init2
.t2_hash_tbl_addr_lo
= (u32
) hba
->t2_hash_tbl_dma
;
114 fcoe_init2
.t2_hash_tbl_addr_hi
= (u32
)
115 ((u64
) hba
->t2_hash_tbl_dma
>> 32);
117 fcoe_init2
.t2_ptr_hash_tbl_addr_lo
= (u32
) hba
->t2_hash_tbl_ptr_dma
;
118 fcoe_init2
.t2_ptr_hash_tbl_addr_hi
= (u32
)
119 ((u64
) hba
->t2_hash_tbl_ptr_dma
>> 32);
121 fcoe_init2
.free_list_count
= BNX2FC_NUM_MAX_SESS
;
123 /* fill init3 KWQE */
124 memset(&fcoe_init3
, 0x00, sizeof(struct fcoe_kwqe_init3
));
125 fcoe_init3
.hdr
.op_code
= FCOE_KWQE_OPCODE_INIT3
;
126 fcoe_init3
.hdr
.flags
= (FCOE_KWQE_LAYER_CODE
<<
127 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT
);
128 fcoe_init3
.error_bit_map_lo
= 0xffffffff;
129 fcoe_init3
.error_bit_map_hi
= 0xffffffff;
132 * enable both cached connection and cached tasks
133 * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both
135 fcoe_init3
.perf_config
= 3;
137 kwqe_arr
[0] = (struct kwqe
*) &fcoe_init1
;
138 kwqe_arr
[1] = (struct kwqe
*) &fcoe_init2
;
139 kwqe_arr
[2] = (struct kwqe
*) &fcoe_init3
;
141 if (hba
->cnic
&& hba
->cnic
->submit_kwqes
)
142 rc
= hba
->cnic
->submit_kwqes(hba
->cnic
, kwqe_arr
, num_kwqes
);
146 int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba
*hba
)
148 struct fcoe_kwqe_destroy fcoe_destroy
;
149 struct kwqe
*kwqe_arr
[2];
153 /* fill destroy KWQE */
154 memset(&fcoe_destroy
, 0x00, sizeof(struct fcoe_kwqe_destroy
));
155 fcoe_destroy
.hdr
.op_code
= FCOE_KWQE_OPCODE_DESTROY
;
156 fcoe_destroy
.hdr
.flags
= (FCOE_KWQE_LAYER_CODE
<<
157 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT
);
158 kwqe_arr
[0] = (struct kwqe
*) &fcoe_destroy
;
160 if (hba
->cnic
&& hba
->cnic
->submit_kwqes
)
161 rc
= hba
->cnic
->submit_kwqes(hba
->cnic
, kwqe_arr
, num_kwqes
);
166 * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
168 * @port: port structure pointer
169 * @tgt: bnx2fc_rport structure pointer
171 int bnx2fc_send_session_ofld_req(struct fcoe_port
*port
,
172 struct bnx2fc_rport
*tgt
)
174 struct fc_lport
*lport
= port
->lport
;
175 struct bnx2fc_interface
*interface
= port
->priv
;
176 struct fcoe_ctlr
*ctlr
= bnx2fc_to_ctlr(interface
);
177 struct bnx2fc_hba
*hba
= interface
->hba
;
178 struct kwqe
*kwqe_arr
[4];
179 struct fcoe_kwqe_conn_offload1 ofld_req1
;
180 struct fcoe_kwqe_conn_offload2 ofld_req2
;
181 struct fcoe_kwqe_conn_offload3 ofld_req3
;
182 struct fcoe_kwqe_conn_offload4 ofld_req4
;
183 struct fc_rport_priv
*rdata
= tgt
->rdata
;
184 struct fc_rport
*rport
= tgt
->rport
;
190 /* Initialize offload request 1 structure */
191 memset(&ofld_req1
, 0x00, sizeof(struct fcoe_kwqe_conn_offload1
));
193 ofld_req1
.hdr
.op_code
= FCOE_KWQE_OPCODE_OFFLOAD_CONN1
;
194 ofld_req1
.hdr
.flags
=
195 (FCOE_KWQE_LAYER_CODE
<< FCOE_KWQE_HEADER_LAYER_CODE_SHIFT
);
198 conn_id
= (u16
)tgt
->fcoe_conn_id
;
199 ofld_req1
.fcoe_conn_id
= conn_id
;
202 ofld_req1
.sq_addr_lo
= (u32
) tgt
->sq_dma
;
203 ofld_req1
.sq_addr_hi
= (u32
)((u64
) tgt
->sq_dma
>> 32);
205 ofld_req1
.rq_pbl_addr_lo
= (u32
) tgt
->rq_pbl_dma
;
206 ofld_req1
.rq_pbl_addr_hi
= (u32
)((u64
) tgt
->rq_pbl_dma
>> 32);
208 ofld_req1
.rq_first_pbe_addr_lo
= (u32
) tgt
->rq_dma
;
209 ofld_req1
.rq_first_pbe_addr_hi
=
210 (u32
)((u64
) tgt
->rq_dma
>> 32);
212 ofld_req1
.rq_prod
= 0x8000;
214 /* Initialize offload request 2 structure */
215 memset(&ofld_req2
, 0x00, sizeof(struct fcoe_kwqe_conn_offload2
));
217 ofld_req2
.hdr
.op_code
= FCOE_KWQE_OPCODE_OFFLOAD_CONN2
;
218 ofld_req2
.hdr
.flags
=
219 (FCOE_KWQE_LAYER_CODE
<< FCOE_KWQE_HEADER_LAYER_CODE_SHIFT
);
221 ofld_req2
.tx_max_fc_pay_len
= rdata
->maxframe_size
;
223 ofld_req2
.cq_addr_lo
= (u32
) tgt
->cq_dma
;
224 ofld_req2
.cq_addr_hi
= (u32
)((u64
)tgt
->cq_dma
>> 32);
226 ofld_req2
.xferq_addr_lo
= (u32
) tgt
->xferq_dma
;
227 ofld_req2
.xferq_addr_hi
= (u32
)((u64
)tgt
->xferq_dma
>> 32);
229 ofld_req2
.conn_db_addr_lo
= (u32
)tgt
->conn_db_dma
;
230 ofld_req2
.conn_db_addr_hi
= (u32
)((u64
)tgt
->conn_db_dma
>> 32);
232 /* Initialize offload request 3 structure */
233 memset(&ofld_req3
, 0x00, sizeof(struct fcoe_kwqe_conn_offload3
));
235 ofld_req3
.hdr
.op_code
= FCOE_KWQE_OPCODE_OFFLOAD_CONN3
;
236 ofld_req3
.hdr
.flags
=
237 (FCOE_KWQE_LAYER_CODE
<< FCOE_KWQE_HEADER_LAYER_CODE_SHIFT
);
239 ofld_req3
.vlan_tag
= interface
->vlan_id
<<
240 FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT
;
241 ofld_req3
.vlan_tag
|= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT
;
243 port_id
= fc_host_port_id(lport
->host
);
245 BNX2FC_HBA_DBG(lport
, "ofld_req: port_id = 0, link down?\n");
250 * Store s_id of the initiator for further reference. This will
251 * be used during disable/destroy during linkdown processing as
252 * when the lport is reset, the port_id also is reset to 0
255 ofld_req3
.s_id
[0] = (port_id
& 0x000000FF);
256 ofld_req3
.s_id
[1] = (port_id
& 0x0000FF00) >> 8;
257 ofld_req3
.s_id
[2] = (port_id
& 0x00FF0000) >> 16;
259 port_id
= rport
->port_id
;
260 ofld_req3
.d_id
[0] = (port_id
& 0x000000FF);
261 ofld_req3
.d_id
[1] = (port_id
& 0x0000FF00) >> 8;
262 ofld_req3
.d_id
[2] = (port_id
& 0x00FF0000) >> 16;
264 ofld_req3
.tx_total_conc_seqs
= rdata
->max_seq
;
266 ofld_req3
.tx_max_conc_seqs_c3
= rdata
->max_seq
;
267 ofld_req3
.rx_max_fc_pay_len
= lport
->mfs
;
269 ofld_req3
.rx_total_conc_seqs
= BNX2FC_MAX_SEQS
;
270 ofld_req3
.rx_max_conc_seqs_c3
= BNX2FC_MAX_SEQS
;
271 ofld_req3
.rx_open_seqs_exch_c3
= 1;
273 ofld_req3
.confq_first_pbe_addr_lo
= tgt
->confq_dma
;
274 ofld_req3
.confq_first_pbe_addr_hi
= (u32
)((u64
) tgt
->confq_dma
>> 32);
276 /* set mul_n_port_ids supported flag to 0, until it is supported */
279 ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
280 FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
282 /* Info from PLOGI response */
283 ofld_req3
.flags
|= (((rdata
->sp_features
& FC_SP_FT_EDTR
) ? 1 : 0) <<
284 FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT
);
286 ofld_req3
.flags
|= (((rdata
->sp_features
& FC_SP_FT_SEQC
) ? 1 : 0) <<
287 FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT
);
290 * Info from PRLI response, this info is used for sequence level error
293 if (tgt
->dev_type
== TYPE_TAPE
) {
294 ofld_req3
.flags
|= 1 <<
295 FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT
;
296 ofld_req3
.flags
|= (((rdata
->flags
& FC_RP_FLAGS_REC_SUPPORTED
)
298 FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT
);
302 ofld_req3
.flags
|= (interface
->vlan_enabled
<<
303 FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT
);
305 /* C2_VALID and ACK flags are not set as they are not supported */
308 /* Initialize offload request 4 structure */
309 memset(&ofld_req4
, 0x00, sizeof(struct fcoe_kwqe_conn_offload4
));
310 ofld_req4
.hdr
.op_code
= FCOE_KWQE_OPCODE_OFFLOAD_CONN4
;
311 ofld_req4
.hdr
.flags
=
312 (FCOE_KWQE_LAYER_CODE
<< FCOE_KWQE_HEADER_LAYER_CODE_SHIFT
);
314 ofld_req4
.e_d_tov_timer_val
= lport
->e_d_tov
/ 20;
317 ofld_req4
.src_mac_addr_lo
[0] = port
->data_src_addr
[5];
319 ofld_req4
.src_mac_addr_lo
[1] = port
->data_src_addr
[4];
320 ofld_req4
.src_mac_addr_mid
[0] = port
->data_src_addr
[3];
321 ofld_req4
.src_mac_addr_mid
[1] = port
->data_src_addr
[2];
322 ofld_req4
.src_mac_addr_hi
[0] = port
->data_src_addr
[1];
323 ofld_req4
.src_mac_addr_hi
[1] = port
->data_src_addr
[0];
324 ofld_req4
.dst_mac_addr_lo
[0] = ctlr
->dest_addr
[5];
326 ofld_req4
.dst_mac_addr_lo
[1] = ctlr
->dest_addr
[4];
327 ofld_req4
.dst_mac_addr_mid
[0] = ctlr
->dest_addr
[3];
328 ofld_req4
.dst_mac_addr_mid
[1] = ctlr
->dest_addr
[2];
329 ofld_req4
.dst_mac_addr_hi
[0] = ctlr
->dest_addr
[1];
330 ofld_req4
.dst_mac_addr_hi
[1] = ctlr
->dest_addr
[0];
332 ofld_req4
.lcq_addr_lo
= (u32
) tgt
->lcq_dma
;
333 ofld_req4
.lcq_addr_hi
= (u32
)((u64
) tgt
->lcq_dma
>> 32);
335 ofld_req4
.confq_pbl_base_addr_lo
= (u32
) tgt
->confq_pbl_dma
;
336 ofld_req4
.confq_pbl_base_addr_hi
=
337 (u32
)((u64
) tgt
->confq_pbl_dma
>> 32);
339 kwqe_arr
[0] = (struct kwqe
*) &ofld_req1
;
340 kwqe_arr
[1] = (struct kwqe
*) &ofld_req2
;
341 kwqe_arr
[2] = (struct kwqe
*) &ofld_req3
;
342 kwqe_arr
[3] = (struct kwqe
*) &ofld_req4
;
344 if (hba
->cnic
&& hba
->cnic
->submit_kwqes
)
345 rc
= hba
->cnic
->submit_kwqes(hba
->cnic
, kwqe_arr
, num_kwqes
);
351 * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
353 * @port: port structure pointer
354 * @tgt: bnx2fc_rport structure pointer
356 int bnx2fc_send_session_enable_req(struct fcoe_port
*port
,
357 struct bnx2fc_rport
*tgt
)
359 struct kwqe
*kwqe_arr
[2];
360 struct bnx2fc_interface
*interface
= port
->priv
;
361 struct fcoe_ctlr
*ctlr
= bnx2fc_to_ctlr(interface
);
362 struct bnx2fc_hba
*hba
= interface
->hba
;
363 struct fcoe_kwqe_conn_enable_disable enbl_req
;
364 struct fc_lport
*lport
= port
->lport
;
365 struct fc_rport
*rport
= tgt
->rport
;
370 memset(&enbl_req
, 0x00,
371 sizeof(struct fcoe_kwqe_conn_enable_disable
));
372 enbl_req
.hdr
.op_code
= FCOE_KWQE_OPCODE_ENABLE_CONN
;
374 (FCOE_KWQE_LAYER_CODE
<< FCOE_KWQE_HEADER_LAYER_CODE_SHIFT
);
376 enbl_req
.src_mac_addr_lo
[0] = port
->data_src_addr
[5];
378 enbl_req
.src_mac_addr_lo
[1] = port
->data_src_addr
[4];
379 enbl_req
.src_mac_addr_mid
[0] = port
->data_src_addr
[3];
380 enbl_req
.src_mac_addr_mid
[1] = port
->data_src_addr
[2];
381 enbl_req
.src_mac_addr_hi
[0] = port
->data_src_addr
[1];
382 enbl_req
.src_mac_addr_hi
[1] = port
->data_src_addr
[0];
383 memcpy(tgt
->src_addr
, port
->data_src_addr
, ETH_ALEN
);
385 enbl_req
.dst_mac_addr_lo
[0] = ctlr
->dest_addr
[5];
386 enbl_req
.dst_mac_addr_lo
[1] = ctlr
->dest_addr
[4];
387 enbl_req
.dst_mac_addr_mid
[0] = ctlr
->dest_addr
[3];
388 enbl_req
.dst_mac_addr_mid
[1] = ctlr
->dest_addr
[2];
389 enbl_req
.dst_mac_addr_hi
[0] = ctlr
->dest_addr
[1];
390 enbl_req
.dst_mac_addr_hi
[1] = ctlr
->dest_addr
[0];
392 port_id
= fc_host_port_id(lport
->host
);
393 if (port_id
!= tgt
->sid
) {
394 printk(KERN_ERR PFX
"WARN: enable_req port_id = 0x%x,"
395 "sid = 0x%x\n", port_id
, tgt
->sid
);
398 enbl_req
.s_id
[0] = (port_id
& 0x000000FF);
399 enbl_req
.s_id
[1] = (port_id
& 0x0000FF00) >> 8;
400 enbl_req
.s_id
[2] = (port_id
& 0x00FF0000) >> 16;
402 port_id
= rport
->port_id
;
403 enbl_req
.d_id
[0] = (port_id
& 0x000000FF);
404 enbl_req
.d_id
[1] = (port_id
& 0x0000FF00) >> 8;
405 enbl_req
.d_id
[2] = (port_id
& 0x00FF0000) >> 16;
406 enbl_req
.vlan_tag
= interface
->vlan_id
<<
407 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT
;
408 enbl_req
.vlan_tag
|= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT
;
409 enbl_req
.vlan_flag
= interface
->vlan_enabled
;
410 enbl_req
.context_id
= tgt
->context_id
;
411 enbl_req
.conn_id
= tgt
->fcoe_conn_id
;
413 kwqe_arr
[0] = (struct kwqe
*) &enbl_req
;
415 if (hba
->cnic
&& hba
->cnic
->submit_kwqes
)
416 rc
= hba
->cnic
->submit_kwqes(hba
->cnic
, kwqe_arr
, num_kwqes
);
421 * bnx2fc_send_session_disable_req - initiates FCoE Session disable
423 * @port: port structure pointer
424 * @tgt: bnx2fc_rport structure pointer
426 int bnx2fc_send_session_disable_req(struct fcoe_port
*port
,
427 struct bnx2fc_rport
*tgt
)
429 struct bnx2fc_interface
*interface
= port
->priv
;
430 struct fcoe_ctlr
*ctlr
= bnx2fc_to_ctlr(interface
);
431 struct bnx2fc_hba
*hba
= interface
->hba
;
432 struct fcoe_kwqe_conn_enable_disable disable_req
;
433 struct kwqe
*kwqe_arr
[2];
434 struct fc_rport
*rport
= tgt
->rport
;
439 memset(&disable_req
, 0x00,
440 sizeof(struct fcoe_kwqe_conn_enable_disable
));
441 disable_req
.hdr
.op_code
= FCOE_KWQE_OPCODE_DISABLE_CONN
;
442 disable_req
.hdr
.flags
=
443 (FCOE_KWQE_LAYER_CODE
<< FCOE_KWQE_HEADER_LAYER_CODE_SHIFT
);
445 disable_req
.src_mac_addr_lo
[0] = tgt
->src_addr
[5];
446 disable_req
.src_mac_addr_lo
[1] = tgt
->src_addr
[4];
447 disable_req
.src_mac_addr_mid
[0] = tgt
->src_addr
[3];
448 disable_req
.src_mac_addr_mid
[1] = tgt
->src_addr
[2];
449 disable_req
.src_mac_addr_hi
[0] = tgt
->src_addr
[1];
450 disable_req
.src_mac_addr_hi
[1] = tgt
->src_addr
[0];
452 disable_req
.dst_mac_addr_lo
[0] = ctlr
->dest_addr
[5];
453 disable_req
.dst_mac_addr_lo
[1] = ctlr
->dest_addr
[4];
454 disable_req
.dst_mac_addr_mid
[0] = ctlr
->dest_addr
[3];
455 disable_req
.dst_mac_addr_mid
[1] = ctlr
->dest_addr
[2];
456 disable_req
.dst_mac_addr_hi
[0] = ctlr
->dest_addr
[1];
457 disable_req
.dst_mac_addr_hi
[1] = ctlr
->dest_addr
[0];
460 disable_req
.s_id
[0] = (port_id
& 0x000000FF);
461 disable_req
.s_id
[1] = (port_id
& 0x0000FF00) >> 8;
462 disable_req
.s_id
[2] = (port_id
& 0x00FF0000) >> 16;
465 port_id
= rport
->port_id
;
466 disable_req
.d_id
[0] = (port_id
& 0x000000FF);
467 disable_req
.d_id
[1] = (port_id
& 0x0000FF00) >> 8;
468 disable_req
.d_id
[2] = (port_id
& 0x00FF0000) >> 16;
469 disable_req
.context_id
= tgt
->context_id
;
470 disable_req
.conn_id
= tgt
->fcoe_conn_id
;
471 disable_req
.vlan_tag
= interface
->vlan_id
<<
472 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT
;
473 disable_req
.vlan_tag
|=
474 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT
;
475 disable_req
.vlan_flag
= interface
->vlan_enabled
;
477 kwqe_arr
[0] = (struct kwqe
*) &disable_req
;
479 if (hba
->cnic
&& hba
->cnic
->submit_kwqes
)
480 rc
= hba
->cnic
->submit_kwqes(hba
->cnic
, kwqe_arr
, num_kwqes
);
486 * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
488 * @hba: adapter structure pointer
489 * @tgt: bnx2fc_rport structure pointer
491 int bnx2fc_send_session_destroy_req(struct bnx2fc_hba
*hba
,
492 struct bnx2fc_rport
*tgt
)
494 struct fcoe_kwqe_conn_destroy destroy_req
;
495 struct kwqe
*kwqe_arr
[2];
499 memset(&destroy_req
, 0x00, sizeof(struct fcoe_kwqe_conn_destroy
));
500 destroy_req
.hdr
.op_code
= FCOE_KWQE_OPCODE_DESTROY_CONN
;
501 destroy_req
.hdr
.flags
=
502 (FCOE_KWQE_LAYER_CODE
<< FCOE_KWQE_HEADER_LAYER_CODE_SHIFT
);
504 destroy_req
.context_id
= tgt
->context_id
;
505 destroy_req
.conn_id
= tgt
->fcoe_conn_id
;
507 kwqe_arr
[0] = (struct kwqe
*) &destroy_req
;
509 if (hba
->cnic
&& hba
->cnic
->submit_kwqes
)
510 rc
= hba
->cnic
->submit_kwqes(hba
->cnic
, kwqe_arr
, num_kwqes
);
515 static bool is_valid_lport(struct bnx2fc_hba
*hba
, struct fc_lport
*lport
)
517 struct bnx2fc_lport
*blport
;
519 spin_lock_bh(&hba
->hba_lock
);
520 list_for_each_entry(blport
, &hba
->vports
, list
) {
521 if (blport
->lport
== lport
) {
522 spin_unlock_bh(&hba
->hba_lock
);
526 spin_unlock_bh(&hba
->hba_lock
);
532 static void bnx2fc_unsol_els_work(struct work_struct
*work
)
534 struct bnx2fc_unsol_els
*unsol_els
;
535 struct fc_lport
*lport
;
536 struct bnx2fc_hba
*hba
;
539 unsol_els
= container_of(work
, struct bnx2fc_unsol_els
, unsol_els_work
);
540 lport
= unsol_els
->lport
;
542 hba
= unsol_els
->hba
;
543 if (is_valid_lport(hba
, lport
))
544 fc_exch_recv(lport
, fp
);
548 void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport
*tgt
,
550 u32 frame_len
, u16 l2_oxid
)
552 struct fcoe_port
*port
= tgt
->port
;
553 struct fc_lport
*lport
= port
->lport
;
554 struct bnx2fc_interface
*interface
= port
->priv
;
555 struct bnx2fc_unsol_els
*unsol_els
;
556 struct fc_frame_header
*fh
;
564 unsol_els
= kzalloc(sizeof(*unsol_els
), GFP_ATOMIC
);
566 BNX2FC_TGT_DBG(tgt
, "Unable to allocate unsol_work\n");
570 BNX2FC_TGT_DBG(tgt
, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
573 payload_len
= frame_len
- sizeof(struct fc_frame_header
);
575 fp
= fc_frame_alloc(lport
, payload_len
);
577 printk(KERN_ERR PFX
"fc_frame_alloc failure\n");
582 fh
= (struct fc_frame_header
*) fc_frame_header_get(fp
);
583 /* Copy FC Frame header and payload into the frame */
584 memcpy(fh
, buf
, frame_len
);
586 if (l2_oxid
!= FC_XID_UNKNOWN
)
587 fh
->fh_ox_id
= htons(l2_oxid
);
591 if ((fh
->fh_r_ctl
== FC_RCTL_ELS_REQ
) ||
592 (fh
->fh_r_ctl
== FC_RCTL_ELS_REP
)) {
594 if (fh
->fh_type
== FC_TYPE_ELS
) {
595 op
= fc_frame_payload_op(fp
);
596 if ((op
== ELS_TEST
) || (op
== ELS_ESTC
) ||
597 (op
== ELS_FAN
) || (op
== ELS_CSU
)) {
599 * No need to reply for these
602 printk(KERN_ERR PFX
"dropping ELS 0x%x\n", op
);
608 crc
= fcoe_fc_crc(fp
);
611 fr_sof(fp
) = FC_SOF_I3
;
612 fr_eof(fp
) = FC_EOF_T
;
613 fr_crc(fp
) = cpu_to_le32(~crc
);
614 unsol_els
->lport
= lport
;
615 unsol_els
->hba
= interface
->hba
;
617 INIT_WORK(&unsol_els
->unsol_els_work
, bnx2fc_unsol_els_work
);
618 queue_work(bnx2fc_wq
, &unsol_els
->unsol_els_work
);
620 BNX2FC_HBA_DBG(lport
, "fh_r_ctl = 0x%x\n", fh
->fh_r_ctl
);
626 static void bnx2fc_process_unsol_compl(struct bnx2fc_rport
*tgt
, u16 wqe
)
629 struct fcoe_err_report_entry
*err_entry
;
630 unsigned char *rq_data
;
631 unsigned char *buf
= NULL
, *buf1
;
635 struct bnx2fc_cmd
*io_req
= NULL
;
636 struct bnx2fc_interface
*interface
= tgt
->port
->priv
;
637 struct bnx2fc_hba
*hba
= interface
->hba
;
639 u64 err_warn_bit_map
;
643 BNX2FC_TGT_DBG(tgt
, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe
);
644 switch (wqe
& FCOE_UNSOLICITED_CQE_SUBTYPE
) {
645 case FCOE_UNSOLICITED_FRAME_CQE_TYPE
:
646 frame_len
= (wqe
& FCOE_UNSOLICITED_CQE_PKT_LEN
) >>
647 FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT
;
649 num_rq
= (frame_len
+ BNX2FC_RQ_BUF_SZ
- 1) / BNX2FC_RQ_BUF_SZ
;
651 spin_lock_bh(&tgt
->tgt_lock
);
652 rq_data
= (unsigned char *)bnx2fc_get_next_rqe(tgt
, num_rq
);
653 spin_unlock_bh(&tgt
->tgt_lock
);
658 buf1
= buf
= kmalloc((num_rq
* BNX2FC_RQ_BUF_SZ
),
662 BNX2FC_TGT_DBG(tgt
, "Memory alloc failure\n");
666 for (i
= 0; i
< num_rq
; i
++) {
667 spin_lock_bh(&tgt
->tgt_lock
);
668 rq_data
= (unsigned char *)
669 bnx2fc_get_next_rqe(tgt
, 1);
670 spin_unlock_bh(&tgt
->tgt_lock
);
671 len
= BNX2FC_RQ_BUF_SZ
;
672 memcpy(buf1
, rq_data
, len
);
676 bnx2fc_process_l2_frame_compl(tgt
, buf
, frame_len
,
681 spin_lock_bh(&tgt
->tgt_lock
);
682 bnx2fc_return_rqe(tgt
, num_rq
);
683 spin_unlock_bh(&tgt
->tgt_lock
);
686 case FCOE_ERROR_DETECTION_CQE_TYPE
:
688 * In case of error reporting CQE a single RQ entry
691 spin_lock_bh(&tgt
->tgt_lock
);
693 err_entry
= (struct fcoe_err_report_entry
*)
694 bnx2fc_get_next_rqe(tgt
, 1);
695 xid
= err_entry
->fc_hdr
.ox_id
;
696 BNX2FC_TGT_DBG(tgt
, "Unsol Error Frame OX_ID = 0x%x\n", xid
);
697 BNX2FC_TGT_DBG(tgt
, "err_warn_bitmap = %08x:%08x\n",
698 err_entry
->data
.err_warn_bitmap_hi
,
699 err_entry
->data
.err_warn_bitmap_lo
);
700 BNX2FC_TGT_DBG(tgt
, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
701 err_entry
->data
.tx_buf_off
, err_entry
->data
.rx_buf_off
);
703 if (xid
> hba
->max_xid
) {
704 BNX2FC_TGT_DBG(tgt
, "xid(0x%x) out of FW range\n",
710 io_req
= (struct bnx2fc_cmd
*)hba
->cmd_mgr
->cmds
[xid
];
714 if (io_req
->cmd_type
!= BNX2FC_SCSI_CMD
) {
715 printk(KERN_ERR PFX
"err_warn: Not a SCSI cmd\n");
719 if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP
,
720 &io_req
->req_flags
)) {
721 BNX2FC_IO_DBG(io_req
, "unsol_err: cleanup in "
722 "progress.. ignore unsol err\n");
726 err_warn_bit_map
= (u64
)
727 ((u64
)err_entry
->data
.err_warn_bitmap_hi
<< 32) |
728 (u64
)err_entry
->data
.err_warn_bitmap_lo
;
729 for (i
= 0; i
< BNX2FC_NUM_ERR_BITS
; i
++) {
730 if (err_warn_bit_map
& (u64
)((u64
)1 << i
)) {
737 * If ABTS is already in progress, and FW error is
738 * received after that, do not cancel the timeout_work
739 * and let the error recovery continue by explicitly
740 * logging out the target, when the ABTS eventually
743 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS
, &io_req
->req_flags
)) {
744 printk(KERN_ERR PFX
"err_warn: io_req (0x%x) already "
745 "in ABTS processing\n", xid
);
748 BNX2FC_TGT_DBG(tgt
, "err = 0x%x\n", err_warn
);
749 if (tgt
->dev_type
!= TYPE_TAPE
)
752 case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION
:
753 case FCOE_ERROR_CODE_DATA_OOO_RO
:
754 case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT
:
755 case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET
:
756 case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ
:
757 case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET
:
758 BNX2FC_TGT_DBG(tgt
, "REC TOV popped for xid - 0x%x\n",
760 memcpy(&io_req
->err_entry
, err_entry
,
761 sizeof(struct fcoe_err_report_entry
));
762 if (!test_bit(BNX2FC_FLAG_SRR_SENT
,
763 &io_req
->req_flags
)) {
764 spin_unlock_bh(&tgt
->tgt_lock
);
765 rc
= bnx2fc_send_rec(io_req
);
766 spin_lock_bh(&tgt
->tgt_lock
);
771 printk(KERN_ERR PFX
"SRR in progress\n");
778 set_bit(BNX2FC_FLAG_ISSUE_ABTS
, &io_req
->req_flags
);
780 * Cancel the timeout_work, as we received IO
781 * completion with FW error.
783 if (cancel_delayed_work(&io_req
->timeout_work
))
784 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
786 rc
= bnx2fc_initiate_abts(io_req
);
788 printk(KERN_ERR PFX
"err_warn: initiate_abts "
789 "failed xid = 0x%x. issue cleanup\n",
791 bnx2fc_initiate_cleanup(io_req
);
794 bnx2fc_return_rqe(tgt
, 1);
795 spin_unlock_bh(&tgt
->tgt_lock
);
798 case FCOE_WARNING_DETECTION_CQE_TYPE
:
800 *In case of warning reporting CQE a single RQ entry
803 spin_lock_bh(&tgt
->tgt_lock
);
805 err_entry
= (struct fcoe_err_report_entry
*)
806 bnx2fc_get_next_rqe(tgt
, 1);
807 xid
= cpu_to_be16(err_entry
->fc_hdr
.ox_id
);
808 BNX2FC_TGT_DBG(tgt
, "Unsol Warning Frame OX_ID = 0x%x\n", xid
);
809 BNX2FC_TGT_DBG(tgt
, "err_warn_bitmap = %08x:%08x",
810 err_entry
->data
.err_warn_bitmap_hi
,
811 err_entry
->data
.err_warn_bitmap_lo
);
812 BNX2FC_TGT_DBG(tgt
, "buf_offsets - tx = 0x%x, rx = 0x%x",
813 err_entry
->data
.tx_buf_off
, err_entry
->data
.rx_buf_off
);
815 if (xid
> hba
->max_xid
) {
816 BNX2FC_TGT_DBG(tgt
, "xid(0x%x) out of FW range\n", xid
);
820 err_warn_bit_map
= (u64
)
821 ((u64
)err_entry
->data
.err_warn_bitmap_hi
<< 32) |
822 (u64
)err_entry
->data
.err_warn_bitmap_lo
;
823 for (i
= 0; i
< BNX2FC_NUM_ERR_BITS
; i
++) {
824 if (err_warn_bit_map
& ((u64
)1 << i
)) {
829 BNX2FC_TGT_DBG(tgt
, "warn = 0x%x\n", err_warn
);
831 io_req
= (struct bnx2fc_cmd
*)hba
->cmd_mgr
->cmds
[xid
];
835 if (io_req
->cmd_type
!= BNX2FC_SCSI_CMD
) {
836 printk(KERN_ERR PFX
"err_warn: Not a SCSI cmd\n");
840 memcpy(&io_req
->err_entry
, err_entry
,
841 sizeof(struct fcoe_err_report_entry
));
843 if (err_warn
== FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION
)
844 /* REC_TOV is not a warning code */
847 BNX2FC_TGT_DBG(tgt
, "Unsolicited warning\n");
849 bnx2fc_return_rqe(tgt
, 1);
850 spin_unlock_bh(&tgt
->tgt_lock
);
854 printk(KERN_ERR PFX
"Unsol Compl: Invalid CQE Subtype\n");
859 void bnx2fc_process_cq_compl(struct bnx2fc_rport
*tgt
, u16 wqe
,
860 unsigned char *rq_data
, u8 num_rq
,
861 struct fcoe_task_ctx_entry
*task
)
863 struct fcoe_port
*port
= tgt
->port
;
864 struct bnx2fc_interface
*interface
= port
->priv
;
865 struct bnx2fc_hba
*hba
= interface
->hba
;
866 struct bnx2fc_cmd
*io_req
;
872 spin_lock_bh(&tgt
->tgt_lock
);
874 xid
= wqe
& FCOE_PEND_WQ_CQE_TASK_ID
;
875 io_req
= (struct bnx2fc_cmd
*)hba
->cmd_mgr
->cmds
[xid
];
877 if (io_req
== NULL
) {
878 printk(KERN_ERR PFX
"ERROR? cq_compl - io_req is NULL\n");
879 spin_unlock_bh(&tgt
->tgt_lock
);
883 /* Timestamp IO completion time */
884 cmd_type
= io_req
->cmd_type
;
886 rx_state
= ((task
->rxwr_txrd
.var_ctx
.rx_flags
&
887 FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE
) >>
888 FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT
);
890 /* Process other IO completion types */
892 case BNX2FC_SCSI_CMD
:
893 if (rx_state
== FCOE_TASK_RX_STATE_COMPLETED
) {
894 bnx2fc_process_scsi_cmd_compl(io_req
, task
, num_rq
,
896 spin_unlock_bh(&tgt
->tgt_lock
);
900 if (rx_state
== FCOE_TASK_RX_STATE_ABTS_COMPLETED
)
901 bnx2fc_process_abts_compl(io_req
, task
, num_rq
);
903 FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED
)
904 bnx2fc_process_cleanup_compl(io_req
, task
, num_rq
);
906 printk(KERN_ERR PFX
"Invalid rx state - %d\n",
910 case BNX2FC_TASK_MGMT_CMD
:
911 BNX2FC_IO_DBG(io_req
, "Processing TM complete\n");
912 bnx2fc_process_tm_compl(io_req
, task
, num_rq
, rq_data
);
917 * ABTS request received by firmware. ABTS response
918 * will be delivered to the task belonging to the IO
921 BNX2FC_IO_DBG(io_req
, "cq_compl- ABTS sent out by fw\n");
922 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
926 if (rx_state
== FCOE_TASK_RX_STATE_COMPLETED
)
927 bnx2fc_process_els_compl(io_req
, task
, num_rq
);
928 else if (rx_state
== FCOE_TASK_RX_STATE_ABTS_COMPLETED
)
929 bnx2fc_process_abts_compl(io_req
, task
, num_rq
);
931 FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED
)
932 bnx2fc_process_cleanup_compl(io_req
, task
, num_rq
);
934 printk(KERN_ERR PFX
"Invalid rx state = %d\n",
939 BNX2FC_IO_DBG(io_req
, "cq_compl- cleanup resp rcvd\n");
940 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
943 case BNX2FC_SEQ_CLEANUP
:
944 BNX2FC_IO_DBG(io_req
, "cq_compl(0x%x) - seq cleanup resp\n",
946 bnx2fc_process_seq_cleanup_compl(io_req
, task
, rx_state
);
947 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
951 printk(KERN_ERR PFX
"Invalid cmd_type %d\n", cmd_type
);
954 spin_unlock_bh(&tgt
->tgt_lock
);
957 void bnx2fc_arm_cq(struct bnx2fc_rport
*tgt
)
959 struct b577xx_fcoe_rx_doorbell
*rx_db
= &tgt
->rx_db
;
963 rx_db
->doorbell_cq_cons
= tgt
->cq_cons_idx
| (tgt
->cq_curr_toggle_bit
<<
964 FCOE_CQE_TOGGLE_BIT_SHIFT
);
965 msg
= *((u32
*)rx_db
);
966 writel(cpu_to_le32(msg
), tgt
->ctx_base
);
970 static struct bnx2fc_work
*bnx2fc_alloc_work(struct bnx2fc_rport
*tgt
, u16 wqe
,
971 unsigned char *rq_data
, u8 num_rq
,
972 struct fcoe_task_ctx_entry
*task
)
974 struct bnx2fc_work
*work
;
975 work
= kzalloc(sizeof(struct bnx2fc_work
), GFP_ATOMIC
);
979 INIT_LIST_HEAD(&work
->list
);
982 work
->num_rq
= num_rq
;
985 memcpy(work
->rq_data
, rq_data
, BNX2FC_RQ_BUF_SZ
);
990 /* Pending work request completion */
991 static bool bnx2fc_pending_work(struct bnx2fc_rport
*tgt
, unsigned int wqe
)
993 unsigned int cpu
= wqe
% num_possible_cpus();
994 struct bnx2fc_percpu_s
*fps
;
995 struct bnx2fc_work
*work
;
996 struct fcoe_task_ctx_entry
*task
;
997 struct fcoe_task_ctx_entry
*task_page
;
998 struct fcoe_port
*port
= tgt
->port
;
999 struct bnx2fc_interface
*interface
= port
->priv
;
1000 struct bnx2fc_hba
*hba
= interface
->hba
;
1001 unsigned char *rq_data
= NULL
;
1002 unsigned char rq_data_buff
[BNX2FC_RQ_BUF_SZ
];
1003 int task_idx
, index
;
1008 xid
= wqe
& FCOE_PEND_WQ_CQE_TASK_ID
;
1009 if (xid
>= hba
->max_tasks
) {
1010 pr_err(PFX
"ERROR:xid out of range\n");
1014 task_idx
= xid
/ BNX2FC_TASKS_PER_PAGE
;
1015 index
= xid
% BNX2FC_TASKS_PER_PAGE
;
1016 task_page
= (struct fcoe_task_ctx_entry
*)hba
->task_ctx
[task_idx
];
1017 task
= &task_page
[index
];
1019 num_rq
= ((task
->rxwr_txrd
.var_ctx
.rx_flags
&
1020 FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE
) >>
1021 FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT
);
1023 memset(rq_data_buff
, 0, BNX2FC_RQ_BUF_SZ
);
1028 rq_data
= bnx2fc_get_next_rqe(tgt
, 1);
1031 /* We do not need extra sense data */
1032 for (i
= 1; i
< num_rq
; i
++)
1033 bnx2fc_get_next_rqe(tgt
, 1);
1037 memcpy(rq_data_buff
, rq_data
, BNX2FC_RQ_BUF_SZ
);
1039 /* return RQ entries */
1040 for (i
= 0; i
< num_rq
; i
++)
1041 bnx2fc_return_rqe(tgt
, 1);
1045 fps
= &per_cpu(bnx2fc_percpu
, cpu
);
1046 spin_lock_bh(&fps
->fp_work_lock
);
1047 if (fps
->iothread
) {
1048 work
= bnx2fc_alloc_work(tgt
, wqe
, rq_data_buff
,
1051 list_add_tail(&work
->list
, &fps
->work_list
);
1052 wake_up_process(fps
->iothread
);
1053 spin_unlock_bh(&fps
->fp_work_lock
);
1057 spin_unlock_bh(&fps
->fp_work_lock
);
1058 bnx2fc_process_cq_compl(tgt
, wqe
,
1059 rq_data_buff
, num_rq
, task
);
1064 int bnx2fc_process_new_cqes(struct bnx2fc_rport
*tgt
)
1066 struct fcoe_cqe
*cq
;
1068 struct fcoe_cqe
*cqe
;
1069 u32 num_free_sqes
= 0;
1074 * cq_lock is a low contention lock used to protect
1075 * the CQ data structure from being freed up during
1076 * the upload operation
1078 spin_lock_bh(&tgt
->cq_lock
);
1081 printk(KERN_ERR PFX
"process_new_cqes: cq is NULL\n");
1082 spin_unlock_bh(&tgt
->cq_lock
);
1086 cq_cons
= tgt
->cq_cons_idx
;
1089 while (((wqe
= cqe
->wqe
) & FCOE_CQE_TOGGLE_BIT
) ==
1090 (tgt
->cq_curr_toggle_bit
<<
1091 FCOE_CQE_TOGGLE_BIT_SHIFT
)) {
1093 /* new entry on the cq */
1094 if (wqe
& FCOE_CQE_CQE_TYPE
) {
1095 /* Unsolicited event notification */
1096 bnx2fc_process_unsol_compl(tgt
, wqe
);
1098 if (bnx2fc_pending_work(tgt
, wqe
))
1105 if (tgt
->cq_cons_idx
== BNX2FC_CQ_WQES_MAX
) {
1106 tgt
->cq_cons_idx
= 0;
1108 tgt
->cq_curr_toggle_bit
=
1109 1 - tgt
->cq_curr_toggle_bit
;
1113 /* Arm CQ only if doorbell is mapped */
1116 atomic_add(num_free_sqes
, &tgt
->free_sqes
);
1118 spin_unlock_bh(&tgt
->cq_lock
);
1123 * bnx2fc_fastpath_notification - process global event queue (KCQ)
1125 * @hba: adapter structure pointer
1126 * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry
1128 * Fast path event notification handler
1130 static void bnx2fc_fastpath_notification(struct bnx2fc_hba
*hba
,
1131 struct fcoe_kcqe
*new_cqe_kcqe
)
1133 u32 conn_id
= new_cqe_kcqe
->fcoe_conn_id
;
1134 struct bnx2fc_rport
*tgt
= hba
->tgt_ofld_list
[conn_id
];
1137 printk(KERN_ERR PFX
"conn_id 0x%x not valid\n", conn_id
);
1141 bnx2fc_process_new_cqes(tgt
);
1145 * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
1147 * @hba: adapter structure pointer
1148 * @ofld_kcqe: connection offload kcqe pointer
1150 * handle session offload completion, enable the session if offload is
1153 static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba
*hba
,
1154 struct fcoe_kcqe
*ofld_kcqe
)
1156 struct bnx2fc_rport
*tgt
;
1157 struct bnx2fc_interface
*interface
;
1161 conn_id
= ofld_kcqe
->fcoe_conn_id
;
1162 context_id
= ofld_kcqe
->fcoe_conn_context_id
;
1163 tgt
= hba
->tgt_ofld_list
[conn_id
];
1165 printk(KERN_ALERT PFX
"ERROR:ofld_cmpl: No pending ofld req\n");
1168 BNX2FC_TGT_DBG(tgt
, "Entered ofld compl - context_id = 0x%x\n",
1169 ofld_kcqe
->fcoe_conn_context_id
);
1170 interface
= tgt
->port
->priv
;
1171 if (hba
!= interface
->hba
) {
1172 printk(KERN_ERR PFX
"ERROR:ofld_cmpl: HBA mismatch\n");
1176 * cnic has allocated a context_id for this session; use this
1177 * while enabling the session.
1179 tgt
->context_id
= context_id
;
1180 if (ofld_kcqe
->completion_status
) {
1181 if (ofld_kcqe
->completion_status
==
1182 FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE
) {
1183 printk(KERN_ERR PFX
"unable to allocate FCoE context "
1185 set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE
, &tgt
->flags
);
1188 /* FW offload request successfully completed */
1189 set_bit(BNX2FC_FLAG_OFFLOADED
, &tgt
->flags
);
1192 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL
, &tgt
->flags
);
1193 wake_up_interruptible(&tgt
->ofld_wait
);
1197 * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
1199 * @hba: adapter structure pointer
1200 * @ofld_kcqe: connection offload kcqe pointer
1202 * handle session enable completion, mark the rport as ready
1205 static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba
*hba
,
1206 struct fcoe_kcqe
*ofld_kcqe
)
1208 struct bnx2fc_rport
*tgt
;
1209 struct bnx2fc_interface
*interface
;
1213 context_id
= ofld_kcqe
->fcoe_conn_context_id
;
1214 conn_id
= ofld_kcqe
->fcoe_conn_id
;
1215 tgt
= hba
->tgt_ofld_list
[conn_id
];
1217 printk(KERN_ERR PFX
"ERROR:enbl_cmpl: No pending ofld req\n");
1221 BNX2FC_TGT_DBG(tgt
, "Enable compl - context_id = 0x%x\n",
1222 ofld_kcqe
->fcoe_conn_context_id
);
1225 * context_id should be the same for this target during offload
1228 if (tgt
->context_id
!= context_id
) {
1229 printk(KERN_ERR PFX
"context id mismatch\n");
1232 interface
= tgt
->port
->priv
;
1233 if (hba
!= interface
->hba
) {
1234 printk(KERN_ERR PFX
"bnx2fc-enbl_cmpl: HBA mismatch\n");
1237 if (!ofld_kcqe
->completion_status
)
1238 /* enable successful - rport ready for issuing IOs */
1239 set_bit(BNX2FC_FLAG_ENABLED
, &tgt
->flags
);
1242 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL
, &tgt
->flags
);
1243 wake_up_interruptible(&tgt
->ofld_wait
);
1246 static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba
*hba
,
1247 struct fcoe_kcqe
*disable_kcqe
)
1250 struct bnx2fc_rport
*tgt
;
1253 conn_id
= disable_kcqe
->fcoe_conn_id
;
1254 tgt
= hba
->tgt_ofld_list
[conn_id
];
1256 printk(KERN_ERR PFX
"ERROR: disable_cmpl: No disable req\n");
1260 BNX2FC_TGT_DBG(tgt
, PFX
"disable_cmpl: conn_id %d\n", conn_id
);
1262 if (disable_kcqe
->completion_status
) {
1263 printk(KERN_ERR PFX
"Disable failed with cmpl status %d\n",
1264 disable_kcqe
->completion_status
);
1265 set_bit(BNX2FC_FLAG_DISABLE_FAILED
, &tgt
->flags
);
1266 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL
, &tgt
->flags
);
1267 wake_up_interruptible(&tgt
->upld_wait
);
1269 /* disable successful */
1270 BNX2FC_TGT_DBG(tgt
, "disable successful\n");
1271 clear_bit(BNX2FC_FLAG_OFFLOADED
, &tgt
->flags
);
1272 clear_bit(BNX2FC_FLAG_ENABLED
, &tgt
->flags
);
1273 set_bit(BNX2FC_FLAG_DISABLED
, &tgt
->flags
);
1274 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL
, &tgt
->flags
);
1275 wake_up_interruptible(&tgt
->upld_wait
);
1279 static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba
*hba
,
1280 struct fcoe_kcqe
*destroy_kcqe
)
1282 struct bnx2fc_rport
*tgt
;
1285 conn_id
= destroy_kcqe
->fcoe_conn_id
;
1286 tgt
= hba
->tgt_ofld_list
[conn_id
];
1288 printk(KERN_ERR PFX
"destroy_cmpl: No destroy req\n");
1292 BNX2FC_TGT_DBG(tgt
, "destroy_cmpl: conn_id %d\n", conn_id
);
1294 if (destroy_kcqe
->completion_status
) {
1295 printk(KERN_ERR PFX
"Destroy conn failed, cmpl status %d\n",
1296 destroy_kcqe
->completion_status
);
1299 /* destroy successful */
1300 BNX2FC_TGT_DBG(tgt
, "upload successful\n");
1301 clear_bit(BNX2FC_FLAG_DISABLED
, &tgt
->flags
);
1302 set_bit(BNX2FC_FLAG_DESTROYED
, &tgt
->flags
);
1303 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL
, &tgt
->flags
);
1304 wake_up_interruptible(&tgt
->upld_wait
);
1308 static void bnx2fc_init_failure(struct bnx2fc_hba
*hba
, u32 err_code
)
1311 case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE
:
1312 printk(KERN_ERR PFX
"init_failure due to invalid opcode\n");
1315 case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE
:
1316 printk(KERN_ERR PFX
"init failed due to ctx alloc failure\n");
1319 case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR
:
1320 printk(KERN_ERR PFX
"init_failure due to NIC error\n");
1322 case FCOE_KCQE_COMPLETION_STATUS_ERROR
:
1323 printk(KERN_ERR PFX
"init failure due to compl status err\n");
1325 case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION
:
1326 printk(KERN_ERR PFX
"init failure due to HSI mismatch\n");
1329 printk(KERN_ERR PFX
"Unknown Error code %d\n", err_code
);
1334 * bnx2fc_indicate_kcqe() - process KCQE
1336 * @context: adapter structure pointer
1337 * @kcq: kcqe pointer
1338 * @num_cqe: Number of completion queue elements
1340 * Generic KCQ event handler
1342 void bnx2fc_indicate_kcqe(void *context
, struct kcqe
*kcq
[],
1345 struct bnx2fc_hba
*hba
= (struct bnx2fc_hba
*)context
;
1347 struct fcoe_kcqe
*kcqe
= NULL
;
1349 while (i
< num_cqe
) {
1350 kcqe
= (struct fcoe_kcqe
*) kcq
[i
++];
1352 switch (kcqe
->op_code
) {
1353 case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION
:
1354 bnx2fc_fastpath_notification(hba
, kcqe
);
1357 case FCOE_KCQE_OPCODE_OFFLOAD_CONN
:
1358 bnx2fc_process_ofld_cmpl(hba
, kcqe
);
1361 case FCOE_KCQE_OPCODE_ENABLE_CONN
:
1362 bnx2fc_process_enable_conn_cmpl(hba
, kcqe
);
1365 case FCOE_KCQE_OPCODE_INIT_FUNC
:
1366 if (kcqe
->completion_status
!=
1367 FCOE_KCQE_COMPLETION_STATUS_SUCCESS
) {
1368 bnx2fc_init_failure(hba
,
1369 kcqe
->completion_status
);
1371 set_bit(ADAPTER_STATE_UP
, &hba
->adapter_state
);
1372 bnx2fc_get_link_state(hba
);
1373 printk(KERN_INFO PFX
"[%.2x]: FCOE_INIT passed\n",
1374 (u8
)hba
->pcidev
->bus
->number
);
1378 case FCOE_KCQE_OPCODE_DESTROY_FUNC
:
1379 if (kcqe
->completion_status
!=
1380 FCOE_KCQE_COMPLETION_STATUS_SUCCESS
) {
1382 printk(KERN_ERR PFX
"DESTROY failed\n");
1384 printk(KERN_ERR PFX
"DESTROY success\n");
1386 set_bit(BNX2FC_FLAG_DESTROY_CMPL
, &hba
->flags
);
1387 wake_up_interruptible(&hba
->destroy_wait
);
1390 case FCOE_KCQE_OPCODE_DISABLE_CONN
:
1391 bnx2fc_process_conn_disable_cmpl(hba
, kcqe
);
1394 case FCOE_KCQE_OPCODE_DESTROY_CONN
:
1395 bnx2fc_process_conn_destroy_cmpl(hba
, kcqe
);
1398 case FCOE_KCQE_OPCODE_STAT_FUNC
:
1399 if (kcqe
->completion_status
!=
1400 FCOE_KCQE_COMPLETION_STATUS_SUCCESS
)
1401 printk(KERN_ERR PFX
"STAT failed\n");
1402 complete(&hba
->stat_req_done
);
1405 case FCOE_KCQE_OPCODE_FCOE_ERROR
:
1407 printk(KERN_ERR PFX
"unknown opcode 0x%x\n",
1413 void bnx2fc_add_2_sq(struct bnx2fc_rport
*tgt
, u16 xid
)
1415 struct fcoe_sqe
*sqe
;
1417 sqe
= &tgt
->sq
[tgt
->sq_prod_idx
];
1420 sqe
->wqe
= xid
<< FCOE_SQE_TASK_ID_SHIFT
;
1421 sqe
->wqe
|= tgt
->sq_curr_toggle_bit
<< FCOE_SQE_TOGGLE_BIT_SHIFT
;
1423 /* Advance SQ Prod Idx */
1424 if (++tgt
->sq_prod_idx
== BNX2FC_SQ_WQES_MAX
) {
1425 tgt
->sq_prod_idx
= 0;
1426 tgt
->sq_curr_toggle_bit
= 1 - tgt
->sq_curr_toggle_bit
;
1430 void bnx2fc_ring_doorbell(struct bnx2fc_rport
*tgt
)
1432 struct b577xx_doorbell_set_prod
*sq_db
= &tgt
->sq_db
;
1436 sq_db
->prod
= tgt
->sq_prod_idx
|
1437 (tgt
->sq_curr_toggle_bit
<< 15);
1438 msg
= *((u32
*)sq_db
);
1439 writel(cpu_to_le32(msg
), tgt
->ctx_base
);
1443 int bnx2fc_map_doorbell(struct bnx2fc_rport
*tgt
)
1445 u32 context_id
= tgt
->context_id
;
1446 struct fcoe_port
*port
= tgt
->port
;
1448 resource_size_t reg_base
;
1449 struct bnx2fc_interface
*interface
= port
->priv
;
1450 struct bnx2fc_hba
*hba
= interface
->hba
;
1452 reg_base
= pci_resource_start(hba
->pcidev
,
1453 BNX2X_DOORBELL_PCI_BAR
);
1454 reg_off
= (1 << BNX2X_DB_SHIFT
) * (context_id
& 0x1FFFF);
1455 tgt
->ctx_base
= ioremap(reg_base
+ reg_off
, 4);
1461 char *bnx2fc_get_next_rqe(struct bnx2fc_rport
*tgt
, u8 num_items
)
1463 char *buf
= (char *)tgt
->rq
+ (tgt
->rq_cons_idx
* BNX2FC_RQ_BUF_SZ
);
1465 if (tgt
->rq_cons_idx
+ num_items
> BNX2FC_RQ_WQES_MAX
)
1468 tgt
->rq_cons_idx
+= num_items
;
1470 if (tgt
->rq_cons_idx
>= BNX2FC_RQ_WQES_MAX
)
1471 tgt
->rq_cons_idx
-= BNX2FC_RQ_WQES_MAX
;
1476 void bnx2fc_return_rqe(struct bnx2fc_rport
*tgt
, u8 num_items
)
1478 /* return the rq buffer */
1479 u32 next_prod_idx
= tgt
->rq_prod_idx
+ num_items
;
1480 if ((next_prod_idx
& 0x7fff) == BNX2FC_RQ_WQES_MAX
) {
1481 /* Wrap around RQ */
1482 next_prod_idx
+= 0x8000 - BNX2FC_RQ_WQES_MAX
;
1484 tgt
->rq_prod_idx
= next_prod_idx
;
1485 tgt
->conn_db
->rq_prod
= tgt
->rq_prod_idx
;
1488 void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd
*seq_clnp_req
,
1489 struct fcoe_task_ctx_entry
*task
,
1490 struct bnx2fc_cmd
*orig_io_req
,
1493 struct scsi_cmnd
*sc_cmd
= orig_io_req
->sc_cmd
;
1494 struct bnx2fc_rport
*tgt
= seq_clnp_req
->tgt
;
1495 struct fcoe_bd_ctx
*bd
= orig_io_req
->bd_tbl
->bd_tbl
;
1496 struct fcoe_ext_mul_sges_ctx
*sgl
;
1497 u8 task_type
= FCOE_TASK_TYPE_SEQUENCE_CLEANUP
;
1499 u16 orig_xid
= orig_io_req
->xid
;
1500 u32 context_id
= tgt
->context_id
;
1501 u64 phys_addr
= (u64
)orig_io_req
->bd_tbl
->bd_tbl_dma
;
1502 u32 orig_offset
= offset
;
1506 memset(task
, 0, sizeof(struct fcoe_task_ctx_entry
));
1508 if (sc_cmd
->sc_data_direction
== DMA_TO_DEVICE
)
1509 orig_task_type
= FCOE_TASK_TYPE_WRITE
;
1511 orig_task_type
= FCOE_TASK_TYPE_READ
;
1514 task
->txwr_rxrd
.const_ctx
.tx_flags
=
1515 FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP
<<
1516 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT
;
1518 task
->txwr_rxrd
.const_ctx
.init_flags
= task_type
<<
1519 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT
;
1520 task
->txwr_rxrd
.const_ctx
.init_flags
|= FCOE_TASK_CLASS_TYPE_3
<<
1521 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT
;
1522 task
->rxwr_txrd
.const_ctx
.init_flags
= context_id
<<
1523 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT
;
1524 task
->rxwr_txrd
.const_ctx
.init_flags
= context_id
<<
1525 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT
;
1527 task
->txwr_rxrd
.union_ctx
.cleanup
.ctx
.cleaned_task_id
= orig_xid
;
1529 task
->txwr_rxrd
.union_ctx
.cleanup
.ctx
.rolled_tx_seq_cnt
= 0;
1530 task
->txwr_rxrd
.union_ctx
.cleanup
.ctx
.rolled_tx_data_offset
= offset
;
1532 bd_count
= orig_io_req
->bd_tbl
->bd_valid
;
1534 /* obtain the appropriate bd entry from relative offset */
1535 for (i
= 0; i
< bd_count
; i
++) {
1536 if (offset
< bd
[i
].buf_len
)
1538 offset
-= bd
[i
].buf_len
;
1540 phys_addr
+= (i
* sizeof(struct fcoe_bd_ctx
));
1542 if (orig_task_type
== FCOE_TASK_TYPE_WRITE
) {
1543 task
->txwr_only
.sgl_ctx
.sgl
.mul_sgl
.cur_sge_addr
.lo
=
1545 task
->txwr_only
.sgl_ctx
.sgl
.mul_sgl
.cur_sge_addr
.hi
=
1546 (u32
)((u64
)phys_addr
>> 32);
1547 task
->txwr_only
.sgl_ctx
.sgl
.mul_sgl
.sgl_size
=
1549 task
->txwr_only
.sgl_ctx
.sgl
.mul_sgl
.cur_sge_off
=
1550 offset
; /* adjusted offset */
1551 task
->txwr_only
.sgl_ctx
.sgl
.mul_sgl
.cur_sge_idx
= i
;
1554 /* Multiple SGEs were used for this IO */
1555 sgl
= &task
->rxwr_only
.union_ctx
.read_info
.sgl_ctx
.sgl
;
1556 sgl
->mul_sgl
.cur_sge_addr
.lo
= (u32
)phys_addr
;
1557 sgl
->mul_sgl
.cur_sge_addr
.hi
= (u32
)((u64
)phys_addr
>> 32);
1558 sgl
->mul_sgl
.sgl_size
= bd_count
;
1559 sgl
->mul_sgl
.cur_sge_off
= offset
; /*adjusted offset */
1560 sgl
->mul_sgl
.cur_sge_idx
= i
;
1562 memset(&task
->rxwr_only
.rx_seq_ctx
, 0,
1563 sizeof(struct fcoe_rx_seq_ctx
));
1564 task
->rxwr_only
.rx_seq_ctx
.low_exp_ro
= orig_offset
;
1565 task
->rxwr_only
.rx_seq_ctx
.high_exp_ro
= orig_offset
;
1568 void bnx2fc_init_cleanup_task(struct bnx2fc_cmd
*io_req
,
1569 struct fcoe_task_ctx_entry
*task
,
1572 u8 task_type
= FCOE_TASK_TYPE_EXCHANGE_CLEANUP
;
1573 struct bnx2fc_rport
*tgt
= io_req
->tgt
;
1574 u32 context_id
= tgt
->context_id
;
1576 memset(task
, 0, sizeof(struct fcoe_task_ctx_entry
));
1578 /* Tx Write Rx Read */
1580 task
->txwr_rxrd
.const_ctx
.init_flags
= task_type
<<
1581 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT
;
1582 task
->txwr_rxrd
.const_ctx
.init_flags
|= FCOE_TASK_CLASS_TYPE_3
<<
1583 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT
;
1584 if (tgt
->dev_type
== TYPE_TAPE
)
1585 task
->txwr_rxrd
.const_ctx
.init_flags
|=
1586 FCOE_TASK_DEV_TYPE_TAPE
<<
1587 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT
;
1589 task
->txwr_rxrd
.const_ctx
.init_flags
|=
1590 FCOE_TASK_DEV_TYPE_DISK
<<
1591 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT
;
1592 task
->txwr_rxrd
.union_ctx
.cleanup
.ctx
.cleaned_task_id
= orig_xid
;
1595 task
->txwr_rxrd
.const_ctx
.tx_flags
=
1596 FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP
<<
1597 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT
;
1599 /* Rx Read Tx Write */
1600 task
->rxwr_txrd
.const_ctx
.init_flags
= context_id
<<
1601 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT
;
1602 task
->rxwr_txrd
.var_ctx
.rx_flags
|= 1 <<
1603 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT
;
1606 void bnx2fc_init_mp_task(struct bnx2fc_cmd
*io_req
,
1607 struct fcoe_task_ctx_entry
*task
)
1609 struct bnx2fc_mp_req
*mp_req
= &(io_req
->mp_req
);
1610 struct bnx2fc_rport
*tgt
= io_req
->tgt
;
1611 struct fc_frame_header
*fc_hdr
;
1612 struct fcoe_ext_mul_sges_ctx
*sgl
;
1619 /* Obtain task_type */
1620 if ((io_req
->cmd_type
== BNX2FC_TASK_MGMT_CMD
) ||
1621 (io_req
->cmd_type
== BNX2FC_ELS
)) {
1622 task_type
= FCOE_TASK_TYPE_MIDPATH
;
1623 } else if (io_req
->cmd_type
== BNX2FC_ABTS
) {
1624 task_type
= FCOE_TASK_TYPE_ABTS
;
1627 memset(task
, 0, sizeof(struct fcoe_task_ctx_entry
));
1629 /* Setup the task from io_req for easy reference */
1630 io_req
->task
= task
;
1632 BNX2FC_IO_DBG(io_req
, "Init MP task for cmd_type = %d task_type = %d\n",
1633 io_req
->cmd_type
, task_type
);
1636 if ((task_type
== FCOE_TASK_TYPE_MIDPATH
) ||
1637 (task_type
== FCOE_TASK_TYPE_UNSOLICITED
)) {
1638 task
->txwr_only
.sgl_ctx
.sgl
.mul_sgl
.cur_sge_addr
.lo
=
1639 (u32
)mp_req
->mp_req_bd_dma
;
1640 task
->txwr_only
.sgl_ctx
.sgl
.mul_sgl
.cur_sge_addr
.hi
=
1641 (u32
)((u64
)mp_req
->mp_req_bd_dma
>> 32);
1642 task
->txwr_only
.sgl_ctx
.sgl
.mul_sgl
.sgl_size
= 1;
1645 /* Tx Write Rx Read */
1647 task
->txwr_rxrd
.const_ctx
.init_flags
= task_type
<<
1648 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT
;
1649 if (tgt
->dev_type
== TYPE_TAPE
)
1650 task
->txwr_rxrd
.const_ctx
.init_flags
|=
1651 FCOE_TASK_DEV_TYPE_TAPE
<<
1652 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT
;
1654 task
->txwr_rxrd
.const_ctx
.init_flags
|=
1655 FCOE_TASK_DEV_TYPE_DISK
<<
1656 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT
;
1657 task
->txwr_rxrd
.const_ctx
.init_flags
|= FCOE_TASK_CLASS_TYPE_3
<<
1658 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT
;
1661 task
->txwr_rxrd
.const_ctx
.tx_flags
= FCOE_TASK_TX_STATE_INIT
<<
1662 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT
;
1664 /* Rx Write Tx Read */
1665 task
->rxwr_txrd
.const_ctx
.data_2_trns
= io_req
->data_xfer_len
;
1668 task
->rxwr_txrd
.var_ctx
.rx_flags
|= 1 <<
1669 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT
;
1671 context_id
= tgt
->context_id
;
1672 task
->rxwr_txrd
.const_ctx
.init_flags
= context_id
<<
1673 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT
;
1675 fc_hdr
= &(mp_req
->req_fc_hdr
);
1676 if (task_type
== FCOE_TASK_TYPE_MIDPATH
) {
1677 fc_hdr
->fh_ox_id
= cpu_to_be16(io_req
->xid
);
1678 fc_hdr
->fh_rx_id
= htons(0xffff);
1679 task
->rxwr_txrd
.var_ctx
.rx_id
= 0xffff;
1680 } else if (task_type
== FCOE_TASK_TYPE_UNSOLICITED
) {
1681 fc_hdr
->fh_rx_id
= cpu_to_be16(io_req
->xid
);
1684 /* Fill FC Header into middle path buffer */
1685 hdr
= (u64
*) &task
->txwr_rxrd
.union_ctx
.tx_frame
.fc_hdr
;
1686 memcpy(temp_hdr
, fc_hdr
, sizeof(temp_hdr
));
1687 hdr
[0] = cpu_to_be64(temp_hdr
[0]);
1688 hdr
[1] = cpu_to_be64(temp_hdr
[1]);
1689 hdr
[2] = cpu_to_be64(temp_hdr
[2]);
1692 if (task_type
== FCOE_TASK_TYPE_MIDPATH
) {
1693 sgl
= &task
->rxwr_only
.union_ctx
.read_info
.sgl_ctx
.sgl
;
1695 sgl
->mul_sgl
.cur_sge_addr
.lo
= (u32
)mp_req
->mp_resp_bd_dma
;
1696 sgl
->mul_sgl
.cur_sge_addr
.hi
=
1697 (u32
)((u64
)mp_req
->mp_resp_bd_dma
>> 32);
1698 sgl
->mul_sgl
.sgl_size
= 1;
1702 void bnx2fc_init_task(struct bnx2fc_cmd
*io_req
,
1703 struct fcoe_task_ctx_entry
*task
)
1706 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
1707 struct io_bdt
*bd_tbl
= io_req
->bd_tbl
;
1708 struct bnx2fc_rport
*tgt
= io_req
->tgt
;
1709 struct fcoe_cached_sge_ctx
*cached_sge
;
1710 struct fcoe_ext_mul_sges_ctx
*sgl
;
1711 int dev_type
= tgt
->dev_type
;
1712 struct fcp_cmnd
*fcp_cmnd
;
1714 u64 tmp_fcp_cmnd
[4];
1719 memset(task
, 0, sizeof(struct fcoe_task_ctx_entry
));
1721 /* Setup the task from io_req for easy reference */
1722 io_req
->task
= task
;
1724 if (sc_cmd
->sc_data_direction
== DMA_TO_DEVICE
)
1725 task_type
= FCOE_TASK_TYPE_WRITE
;
1727 task_type
= FCOE_TASK_TYPE_READ
;
1730 bd_count
= bd_tbl
->bd_valid
;
1731 cached_sge
= &task
->rxwr_only
.union_ctx
.read_info
.sgl_ctx
.cached_sge
;
1732 if (task_type
== FCOE_TASK_TYPE_WRITE
) {
1733 if ((dev_type
== TYPE_DISK
) && (bd_count
== 1)) {
1734 struct fcoe_bd_ctx
*fcoe_bd_tbl
= bd_tbl
->bd_tbl
;
1736 task
->txwr_only
.sgl_ctx
.cached_sge
.cur_buf_addr
.lo
=
1737 cached_sge
->cur_buf_addr
.lo
=
1738 fcoe_bd_tbl
->buf_addr_lo
;
1739 task
->txwr_only
.sgl_ctx
.cached_sge
.cur_buf_addr
.hi
=
1740 cached_sge
->cur_buf_addr
.hi
=
1741 fcoe_bd_tbl
->buf_addr_hi
;
1742 task
->txwr_only
.sgl_ctx
.cached_sge
.cur_buf_rem
=
1743 cached_sge
->cur_buf_rem
=
1744 fcoe_bd_tbl
->buf_len
;
1746 task
->txwr_rxrd
.const_ctx
.init_flags
|= 1 <<
1747 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT
;
1749 task
->txwr_only
.sgl_ctx
.sgl
.mul_sgl
.cur_sge_addr
.lo
=
1750 (u32
)bd_tbl
->bd_tbl_dma
;
1751 task
->txwr_only
.sgl_ctx
.sgl
.mul_sgl
.cur_sge_addr
.hi
=
1752 (u32
)((u64
)bd_tbl
->bd_tbl_dma
>> 32);
1753 task
->txwr_only
.sgl_ctx
.sgl
.mul_sgl
.sgl_size
=
1758 /*Tx Write Rx Read */
1759 /* Init state to NORMAL */
1760 task
->txwr_rxrd
.const_ctx
.init_flags
|= task_type
<<
1761 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT
;
1762 if (dev_type
== TYPE_TAPE
) {
1763 task
->txwr_rxrd
.const_ctx
.init_flags
|=
1764 FCOE_TASK_DEV_TYPE_TAPE
<<
1765 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT
;
1766 io_req
->rec_retry
= 0;
1767 io_req
->rec_retry
= 0;
1769 task
->txwr_rxrd
.const_ctx
.init_flags
|=
1770 FCOE_TASK_DEV_TYPE_DISK
<<
1771 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT
;
1772 task
->txwr_rxrd
.const_ctx
.init_flags
|= FCOE_TASK_CLASS_TYPE_3
<<
1773 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT
;
1775 task
->txwr_rxrd
.const_ctx
.tx_flags
= FCOE_TASK_TX_STATE_NORMAL
<<
1776 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT
;
1778 /* Set initial seq counter */
1779 task
->txwr_rxrd
.union_ctx
.tx_seq
.ctx
.seq_cnt
= 1;
1781 /* Fill FCP_CMND IU */
1782 fcp_cmnd
= (struct fcp_cmnd
*)&tmp_fcp_cmnd
;
1783 bnx2fc_build_fcp_cmnd(io_req
, fcp_cmnd
);
1784 int_to_scsilun(sc_cmd
->device
->lun
, &fcp_cmnd
->fc_lun
);
1785 memcpy(fcp_cmnd
->fc_cdb
, sc_cmd
->cmnd
, sc_cmd
->cmd_len
);
1786 raw_fcp_cmnd
= (u64
*)
1787 task
->txwr_rxrd
.union_ctx
.fcp_cmd
.opaque
;
1790 cnt
= sizeof(struct fcp_cmnd
) / sizeof(u64
);
1792 for (i
= 0; i
< cnt
; i
++) {
1793 *raw_fcp_cmnd
= cpu_to_be64(tmp_fcp_cmnd
[i
]);
1797 /* Rx Write Tx Read */
1798 task
->rxwr_txrd
.const_ctx
.data_2_trns
= io_req
->data_xfer_len
;
1800 context_id
= tgt
->context_id
;
1801 task
->rxwr_txrd
.const_ctx
.init_flags
= context_id
<<
1802 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT
;
1805 /* Set state to "waiting for the first packet" */
1806 task
->rxwr_txrd
.var_ctx
.rx_flags
|= 1 <<
1807 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT
;
1809 task
->rxwr_txrd
.var_ctx
.rx_id
= 0xffff;
1812 if (task_type
!= FCOE_TASK_TYPE_READ
)
1815 sgl
= &task
->rxwr_only
.union_ctx
.read_info
.sgl_ctx
.sgl
;
1816 bd_count
= bd_tbl
->bd_valid
;
1818 if (dev_type
== TYPE_DISK
) {
1819 if (bd_count
== 1) {
1821 struct fcoe_bd_ctx
*fcoe_bd_tbl
= bd_tbl
->bd_tbl
;
1823 cached_sge
->cur_buf_addr
.lo
= fcoe_bd_tbl
->buf_addr_lo
;
1824 cached_sge
->cur_buf_addr
.hi
= fcoe_bd_tbl
->buf_addr_hi
;
1825 cached_sge
->cur_buf_rem
= fcoe_bd_tbl
->buf_len
;
1826 task
->txwr_rxrd
.const_ctx
.init_flags
|= 1 <<
1827 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT
;
1828 } else if (bd_count
== 2) {
1829 struct fcoe_bd_ctx
*fcoe_bd_tbl
= bd_tbl
->bd_tbl
;
1831 cached_sge
->cur_buf_addr
.lo
= fcoe_bd_tbl
->buf_addr_lo
;
1832 cached_sge
->cur_buf_addr
.hi
= fcoe_bd_tbl
->buf_addr_hi
;
1833 cached_sge
->cur_buf_rem
= fcoe_bd_tbl
->buf_len
;
1836 cached_sge
->second_buf_addr
.lo
=
1837 fcoe_bd_tbl
->buf_addr_lo
;
1838 cached_sge
->second_buf_addr
.hi
=
1839 fcoe_bd_tbl
->buf_addr_hi
;
1840 cached_sge
->second_buf_rem
= fcoe_bd_tbl
->buf_len
;
1841 task
->txwr_rxrd
.const_ctx
.init_flags
|= 1 <<
1842 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT
;
1845 sgl
->mul_sgl
.cur_sge_addr
.lo
= (u32
)bd_tbl
->bd_tbl_dma
;
1846 sgl
->mul_sgl
.cur_sge_addr
.hi
=
1847 (u32
)((u64
)bd_tbl
->bd_tbl_dma
>> 32);
1848 sgl
->mul_sgl
.sgl_size
= bd_count
;
1851 sgl
->mul_sgl
.cur_sge_addr
.lo
= (u32
)bd_tbl
->bd_tbl_dma
;
1852 sgl
->mul_sgl
.cur_sge_addr
.hi
=
1853 (u32
)((u64
)bd_tbl
->bd_tbl_dma
>> 32);
1854 sgl
->mul_sgl
.sgl_size
= bd_count
;
1859 * bnx2fc_setup_task_ctx - allocate and map task context
1861 * @hba: pointer to adapter structure
1863 * allocate memory for task context, and associated BD table to be used
1867 int bnx2fc_setup_task_ctx(struct bnx2fc_hba
*hba
)
1870 struct regpair
*task_ctx_bdt
;
1872 int task_ctx_arr_sz
;
1876 * Allocate task context bd table. A page size of bd table
1877 * can map 256 buffers. Each buffer contains 32 task context
1878 * entries. Hence the limit with one page is 8192 task context
1881 hba
->task_ctx_bd_tbl
= dma_alloc_coherent(&hba
->pcidev
->dev
,
1883 &hba
->task_ctx_bd_dma
,
1885 if (!hba
->task_ctx_bd_tbl
) {
1886 printk(KERN_ERR PFX
"unable to allocate task context BDT\n");
1892 * Allocate task_ctx which is an array of pointers pointing to
1893 * a page containing 32 task contexts
1895 task_ctx_arr_sz
= (hba
->max_tasks
/ BNX2FC_TASKS_PER_PAGE
);
1896 hba
->task_ctx
= kzalloc((task_ctx_arr_sz
* sizeof(void *)),
1898 if (!hba
->task_ctx
) {
1899 printk(KERN_ERR PFX
"unable to allocate task context array\n");
1905 * Allocate task_ctx_dma which is an array of dma addresses
1907 hba
->task_ctx_dma
= kmalloc((task_ctx_arr_sz
*
1908 sizeof(dma_addr_t
)), GFP_KERNEL
);
1909 if (!hba
->task_ctx_dma
) {
1910 printk(KERN_ERR PFX
"unable to alloc context mapping array\n");
1915 task_ctx_bdt
= (struct regpair
*)hba
->task_ctx_bd_tbl
;
1916 for (i
= 0; i
< task_ctx_arr_sz
; i
++) {
1918 hba
->task_ctx
[i
] = dma_alloc_coherent(&hba
->pcidev
->dev
,
1920 &hba
->task_ctx_dma
[i
],
1922 if (!hba
->task_ctx
[i
]) {
1923 printk(KERN_ERR PFX
"unable to alloc task context\n");
1927 addr
= (u64
)hba
->task_ctx_dma
[i
];
1928 task_ctx_bdt
->hi
= cpu_to_le32((u64
)addr
>> 32);
1929 task_ctx_bdt
->lo
= cpu_to_le32((u32
)addr
);
1935 for (i
= 0; i
< task_ctx_arr_sz
; i
++) {
1936 if (hba
->task_ctx
[i
]) {
1938 dma_free_coherent(&hba
->pcidev
->dev
, PAGE_SIZE
,
1939 hba
->task_ctx
[i
], hba
->task_ctx_dma
[i
]);
1940 hba
->task_ctx
[i
] = NULL
;
1944 kfree(hba
->task_ctx_dma
);
1945 hba
->task_ctx_dma
= NULL
;
1947 kfree(hba
->task_ctx
);
1948 hba
->task_ctx
= NULL
;
1950 dma_free_coherent(&hba
->pcidev
->dev
, PAGE_SIZE
,
1951 hba
->task_ctx_bd_tbl
, hba
->task_ctx_bd_dma
);
1952 hba
->task_ctx_bd_tbl
= NULL
;
1957 void bnx2fc_free_task_ctx(struct bnx2fc_hba
*hba
)
1959 int task_ctx_arr_sz
;
1962 if (hba
->task_ctx_bd_tbl
) {
1963 dma_free_coherent(&hba
->pcidev
->dev
, PAGE_SIZE
,
1964 hba
->task_ctx_bd_tbl
,
1965 hba
->task_ctx_bd_dma
);
1966 hba
->task_ctx_bd_tbl
= NULL
;
1969 task_ctx_arr_sz
= (hba
->max_tasks
/ BNX2FC_TASKS_PER_PAGE
);
1970 if (hba
->task_ctx
) {
1971 for (i
= 0; i
< task_ctx_arr_sz
; i
++) {
1972 if (hba
->task_ctx
[i
]) {
1973 dma_free_coherent(&hba
->pcidev
->dev
, PAGE_SIZE
,
1975 hba
->task_ctx_dma
[i
]);
1976 hba
->task_ctx
[i
] = NULL
;
1979 kfree(hba
->task_ctx
);
1980 hba
->task_ctx
= NULL
;
1983 kfree(hba
->task_ctx_dma
);
1984 hba
->task_ctx_dma
= NULL
;
1987 static void bnx2fc_free_hash_table(struct bnx2fc_hba
*hba
)
1993 if (hba
->hash_tbl_segments
) {
1995 pbl
= hba
->hash_tbl_pbl
;
1997 segment_count
= hba
->hash_tbl_segment_count
;
1998 for (i
= 0; i
< segment_count
; ++i
) {
1999 dma_addr_t dma_address
;
2001 dma_address
= le32_to_cpu(*pbl
);
2003 dma_address
+= ((u64
)le32_to_cpu(*pbl
)) << 32;
2005 dma_free_coherent(&hba
->pcidev
->dev
,
2006 BNX2FC_HASH_TBL_CHUNK_SIZE
,
2007 hba
->hash_tbl_segments
[i
],
2012 kfree(hba
->hash_tbl_segments
);
2013 hba
->hash_tbl_segments
= NULL
;
2016 if (hba
->hash_tbl_pbl
) {
2017 dma_free_coherent(&hba
->pcidev
->dev
, PAGE_SIZE
,
2019 hba
->hash_tbl_pbl_dma
);
2020 hba
->hash_tbl_pbl
= NULL
;
2024 static int bnx2fc_allocate_hash_table(struct bnx2fc_hba
*hba
)
2027 int hash_table_size
;
2029 int segment_array_size
;
2030 int dma_segment_array_size
;
2031 dma_addr_t
*dma_segment_array
;
2034 hash_table_size
= BNX2FC_NUM_MAX_SESS
* BNX2FC_MAX_ROWS_IN_HASH_TBL
*
2035 sizeof(struct fcoe_hash_table_entry
);
2037 segment_count
= hash_table_size
+ BNX2FC_HASH_TBL_CHUNK_SIZE
- 1;
2038 segment_count
/= BNX2FC_HASH_TBL_CHUNK_SIZE
;
2039 hba
->hash_tbl_segment_count
= segment_count
;
2041 segment_array_size
= segment_count
* sizeof(*hba
->hash_tbl_segments
);
2042 hba
->hash_tbl_segments
= kzalloc(segment_array_size
, GFP_KERNEL
);
2043 if (!hba
->hash_tbl_segments
) {
2044 printk(KERN_ERR PFX
"hash table pointers alloc failed\n");
2047 dma_segment_array_size
= segment_count
* sizeof(*dma_segment_array
);
2048 dma_segment_array
= kzalloc(dma_segment_array_size
, GFP_KERNEL
);
2049 if (!dma_segment_array
) {
2050 printk(KERN_ERR PFX
"hash table pointers (dma) alloc failed\n");
2054 for (i
= 0; i
< segment_count
; ++i
) {
2055 hba
->hash_tbl_segments
[i
] = dma_alloc_coherent(&hba
->pcidev
->dev
,
2056 BNX2FC_HASH_TBL_CHUNK_SIZE
,
2057 &dma_segment_array
[i
],
2059 if (!hba
->hash_tbl_segments
[i
]) {
2060 printk(KERN_ERR PFX
"hash segment alloc failed\n");
2065 hba
->hash_tbl_pbl
= dma_alloc_coherent(&hba
->pcidev
->dev
, PAGE_SIZE
,
2066 &hba
->hash_tbl_pbl_dma
,
2068 if (!hba
->hash_tbl_pbl
) {
2069 printk(KERN_ERR PFX
"hash table pbl alloc failed\n");
2073 pbl
= hba
->hash_tbl_pbl
;
2074 for (i
= 0; i
< segment_count
; ++i
) {
2075 u64 paddr
= dma_segment_array
[i
];
2076 *pbl
= cpu_to_le32((u32
) paddr
);
2078 *pbl
= cpu_to_le32((u32
) (paddr
>> 32));
2081 pbl
= hba
->hash_tbl_pbl
;
2083 while (*pbl
&& *(pbl
+ 1)) {
2088 kfree(dma_segment_array
);
2092 for (i
= 0; i
< segment_count
; ++i
) {
2093 if (hba
->hash_tbl_segments
[i
])
2094 dma_free_coherent(&hba
->pcidev
->dev
,
2095 BNX2FC_HASH_TBL_CHUNK_SIZE
,
2096 hba
->hash_tbl_segments
[i
],
2097 dma_segment_array
[i
]);
2100 kfree(dma_segment_array
);
2103 kfree(hba
->hash_tbl_segments
);
2104 hba
->hash_tbl_segments
= NULL
;
2109 * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
2111 * @hba: Pointer to adapter structure
2114 int bnx2fc_setup_fw_resc(struct bnx2fc_hba
*hba
)
2120 if (bnx2fc_allocate_hash_table(hba
))
2123 mem_size
= BNX2FC_NUM_MAX_SESS
* sizeof(struct regpair
);
2124 hba
->t2_hash_tbl_ptr
= dma_alloc_coherent(&hba
->pcidev
->dev
, mem_size
,
2125 &hba
->t2_hash_tbl_ptr_dma
,
2127 if (!hba
->t2_hash_tbl_ptr
) {
2128 printk(KERN_ERR PFX
"unable to allocate t2 hash table ptr\n");
2129 bnx2fc_free_fw_resc(hba
);
2133 mem_size
= BNX2FC_NUM_MAX_SESS
*
2134 sizeof(struct fcoe_t2_hash_table_entry
);
2135 hba
->t2_hash_tbl
= dma_alloc_coherent(&hba
->pcidev
->dev
, mem_size
,
2136 &hba
->t2_hash_tbl_dma
,
2138 if (!hba
->t2_hash_tbl
) {
2139 printk(KERN_ERR PFX
"unable to allocate t2 hash table\n");
2140 bnx2fc_free_fw_resc(hba
);
2143 for (i
= 0; i
< BNX2FC_NUM_MAX_SESS
; i
++) {
2144 addr
= (unsigned long) hba
->t2_hash_tbl_dma
+
2145 ((i
+1) * sizeof(struct fcoe_t2_hash_table_entry
));
2146 hba
->t2_hash_tbl
[i
].next
.lo
= addr
& 0xffffffff;
2147 hba
->t2_hash_tbl
[i
].next
.hi
= addr
>> 32;
2150 hba
->dummy_buffer
= dma_alloc_coherent(&hba
->pcidev
->dev
,
2151 PAGE_SIZE
, &hba
->dummy_buf_dma
,
2153 if (!hba
->dummy_buffer
) {
2154 printk(KERN_ERR PFX
"unable to alloc MP Dummy Buffer\n");
2155 bnx2fc_free_fw_resc(hba
);
2159 hba
->stats_buffer
= dma_alloc_coherent(&hba
->pcidev
->dev
, PAGE_SIZE
,
2160 &hba
->stats_buf_dma
,
2162 if (!hba
->stats_buffer
) {
2163 printk(KERN_ERR PFX
"unable to alloc Stats Buffer\n");
2164 bnx2fc_free_fw_resc(hba
);
2171 void bnx2fc_free_fw_resc(struct bnx2fc_hba
*hba
)
2175 if (hba
->stats_buffer
) {
2176 dma_free_coherent(&hba
->pcidev
->dev
, PAGE_SIZE
,
2177 hba
->stats_buffer
, hba
->stats_buf_dma
);
2178 hba
->stats_buffer
= NULL
;
2181 if (hba
->dummy_buffer
) {
2182 dma_free_coherent(&hba
->pcidev
->dev
, PAGE_SIZE
,
2183 hba
->dummy_buffer
, hba
->dummy_buf_dma
);
2184 hba
->dummy_buffer
= NULL
;
2187 if (hba
->t2_hash_tbl_ptr
) {
2188 mem_size
= BNX2FC_NUM_MAX_SESS
* sizeof(struct regpair
);
2189 dma_free_coherent(&hba
->pcidev
->dev
, mem_size
,
2190 hba
->t2_hash_tbl_ptr
,
2191 hba
->t2_hash_tbl_ptr_dma
);
2192 hba
->t2_hash_tbl_ptr
= NULL
;
2195 if (hba
->t2_hash_tbl
) {
2196 mem_size
= BNX2FC_NUM_MAX_SESS
*
2197 sizeof(struct fcoe_t2_hash_table_entry
);
2198 dma_free_coherent(&hba
->pcidev
->dev
, mem_size
,
2199 hba
->t2_hash_tbl
, hba
->t2_hash_tbl_dma
);
2200 hba
->t2_hash_tbl
= NULL
;
2202 bnx2fc_free_hash_table(hba
);