1 /* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver.
2 * This file contains the code that low level functions that interact
3 * with 57712 FCoE firmware.
5 * Copyright (c) 2008 - 2010 Broadcom Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
11 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
16 DECLARE_PER_CPU(struct bnx2fc_percpu_s
, bnx2fc_percpu
);
18 static void bnx2fc_fastpath_notification(struct bnx2fc_hba
*hba
,
19 struct fcoe_kcqe
*new_cqe_kcqe
);
20 static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba
*hba
,
21 struct fcoe_kcqe
*ofld_kcqe
);
22 static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba
*hba
,
23 struct fcoe_kcqe
*ofld_kcqe
);
24 static void bnx2fc_init_failure(struct bnx2fc_hba
*hba
, u32 err_code
);
25 static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba
*hba
,
26 struct fcoe_kcqe
*conn_destroy
);
28 int bnx2fc_send_stat_req(struct bnx2fc_hba
*hba
)
30 struct fcoe_kwqe_stat stat_req
;
31 struct kwqe
*kwqe_arr
[2];
35 memset(&stat_req
, 0x00, sizeof(struct fcoe_kwqe_stat
));
36 stat_req
.hdr
.op_code
= FCOE_KWQE_OPCODE_STAT
;
38 (FCOE_KWQE_LAYER_CODE
<< FCOE_KWQE_HEADER_LAYER_CODE_SHIFT
);
40 stat_req
.stat_params_addr_lo
= (u32
) hba
->stats_buf_dma
;
41 stat_req
.stat_params_addr_hi
= (u32
) ((u64
)hba
->stats_buf_dma
>> 32);
43 kwqe_arr
[0] = (struct kwqe
*) &stat_req
;
45 if (hba
->cnic
&& hba
->cnic
->submit_kwqes
)
46 rc
= hba
->cnic
->submit_kwqes(hba
->cnic
, kwqe_arr
, num_kwqes
);
52 * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
54 * @hba: adapter structure pointer
56 * Send down FCoE firmware init KWQEs which initiates the initial handshake
60 int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba
*hba
)
62 struct fcoe_kwqe_init1 fcoe_init1
;
63 struct fcoe_kwqe_init2 fcoe_init2
;
64 struct fcoe_kwqe_init3 fcoe_init3
;
65 struct kwqe
*kwqe_arr
[3];
70 printk(KERN_ALERT PFX
"hba->cnic NULL during fcoe fw init\n");
75 memset(&fcoe_init1
, 0x00, sizeof(struct fcoe_kwqe_init1
));
76 fcoe_init1
.hdr
.op_code
= FCOE_KWQE_OPCODE_INIT1
;
77 fcoe_init1
.hdr
.flags
= (FCOE_KWQE_LAYER_CODE
<<
78 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT
);
80 fcoe_init1
.num_tasks
= BNX2FC_MAX_TASKS
;
81 fcoe_init1
.sq_num_wqes
= BNX2FC_SQ_WQES_MAX
;
82 fcoe_init1
.rq_num_wqes
= BNX2FC_RQ_WQES_MAX
;
83 fcoe_init1
.rq_buffer_log_size
= BNX2FC_RQ_BUF_LOG_SZ
;
84 fcoe_init1
.cq_num_wqes
= BNX2FC_CQ_WQES_MAX
;
85 fcoe_init1
.dummy_buffer_addr_lo
= (u32
) hba
->dummy_buf_dma
;
86 fcoe_init1
.dummy_buffer_addr_hi
= (u32
) ((u64
)hba
->dummy_buf_dma
>> 32);
87 fcoe_init1
.task_list_pbl_addr_lo
= (u32
) hba
->task_ctx_bd_dma
;
88 fcoe_init1
.task_list_pbl_addr_hi
=
89 (u32
) ((u64
) hba
->task_ctx_bd_dma
>> 32);
90 fcoe_init1
.mtu
= BNX2FC_MINI_JUMBO_MTU
;
92 fcoe_init1
.flags
= (PAGE_SHIFT
<<
93 FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT
);
95 fcoe_init1
.num_sessions_log
= BNX2FC_NUM_MAX_SESS_LOG
;
98 memset(&fcoe_init2
, 0x00, sizeof(struct fcoe_kwqe_init2
));
99 fcoe_init2
.hdr
.op_code
= FCOE_KWQE_OPCODE_INIT2
;
100 fcoe_init2
.hdr
.flags
= (FCOE_KWQE_LAYER_CODE
<<
101 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT
);
103 fcoe_init2
.hash_tbl_pbl_addr_lo
= (u32
) hba
->hash_tbl_pbl_dma
;
104 fcoe_init2
.hash_tbl_pbl_addr_hi
= (u32
)
105 ((u64
) hba
->hash_tbl_pbl_dma
>> 32);
107 fcoe_init2
.t2_hash_tbl_addr_lo
= (u32
) hba
->t2_hash_tbl_dma
;
108 fcoe_init2
.t2_hash_tbl_addr_hi
= (u32
)
109 ((u64
) hba
->t2_hash_tbl_dma
>> 32);
111 fcoe_init2
.t2_ptr_hash_tbl_addr_lo
= (u32
) hba
->t2_hash_tbl_ptr_dma
;
112 fcoe_init2
.t2_ptr_hash_tbl_addr_hi
= (u32
)
113 ((u64
) hba
->t2_hash_tbl_ptr_dma
>> 32);
115 fcoe_init2
.free_list_count
= BNX2FC_NUM_MAX_SESS
;
117 /* fill init3 KWQE */
118 memset(&fcoe_init3
, 0x00, sizeof(struct fcoe_kwqe_init3
));
119 fcoe_init3
.hdr
.op_code
= FCOE_KWQE_OPCODE_INIT3
;
120 fcoe_init3
.hdr
.flags
= (FCOE_KWQE_LAYER_CODE
<<
121 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT
);
122 fcoe_init3
.error_bit_map_lo
= 0xffffffff;
123 fcoe_init3
.error_bit_map_hi
= 0xffffffff;
126 kwqe_arr
[0] = (struct kwqe
*) &fcoe_init1
;
127 kwqe_arr
[1] = (struct kwqe
*) &fcoe_init2
;
128 kwqe_arr
[2] = (struct kwqe
*) &fcoe_init3
;
130 if (hba
->cnic
&& hba
->cnic
->submit_kwqes
)
131 rc
= hba
->cnic
->submit_kwqes(hba
->cnic
, kwqe_arr
, num_kwqes
);
135 int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba
*hba
)
137 struct fcoe_kwqe_destroy fcoe_destroy
;
138 struct kwqe
*kwqe_arr
[2];
142 /* fill destroy KWQE */
143 memset(&fcoe_destroy
, 0x00, sizeof(struct fcoe_kwqe_destroy
));
144 fcoe_destroy
.hdr
.op_code
= FCOE_KWQE_OPCODE_DESTROY
;
145 fcoe_destroy
.hdr
.flags
= (FCOE_KWQE_LAYER_CODE
<<
146 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT
);
147 kwqe_arr
[0] = (struct kwqe
*) &fcoe_destroy
;
149 if (hba
->cnic
&& hba
->cnic
->submit_kwqes
)
150 rc
= hba
->cnic
->submit_kwqes(hba
->cnic
, kwqe_arr
, num_kwqes
);
155 * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
157 * @port: port structure pointer
158 * @tgt: bnx2fc_rport structure pointer
160 int bnx2fc_send_session_ofld_req(struct fcoe_port
*port
,
161 struct bnx2fc_rport
*tgt
)
163 struct fc_lport
*lport
= port
->lport
;
164 struct bnx2fc_hba
*hba
= port
->priv
;
165 struct kwqe
*kwqe_arr
[4];
166 struct fcoe_kwqe_conn_offload1 ofld_req1
;
167 struct fcoe_kwqe_conn_offload2 ofld_req2
;
168 struct fcoe_kwqe_conn_offload3 ofld_req3
;
169 struct fcoe_kwqe_conn_offload4 ofld_req4
;
170 struct fc_rport_priv
*rdata
= tgt
->rdata
;
171 struct fc_rport
*rport
= tgt
->rport
;
177 /* Initialize offload request 1 structure */
178 memset(&ofld_req1
, 0x00, sizeof(struct fcoe_kwqe_conn_offload1
));
180 ofld_req1
.hdr
.op_code
= FCOE_KWQE_OPCODE_OFFLOAD_CONN1
;
181 ofld_req1
.hdr
.flags
=
182 (FCOE_KWQE_LAYER_CODE
<< FCOE_KWQE_HEADER_LAYER_CODE_SHIFT
);
185 conn_id
= (u16
)tgt
->fcoe_conn_id
;
186 ofld_req1
.fcoe_conn_id
= conn_id
;
189 ofld_req1
.sq_addr_lo
= (u32
) tgt
->sq_dma
;
190 ofld_req1
.sq_addr_hi
= (u32
)((u64
) tgt
->sq_dma
>> 32);
192 ofld_req1
.rq_pbl_addr_lo
= (u32
) tgt
->rq_pbl_dma
;
193 ofld_req1
.rq_pbl_addr_hi
= (u32
)((u64
) tgt
->rq_pbl_dma
>> 32);
195 ofld_req1
.rq_first_pbe_addr_lo
= (u32
) tgt
->rq_dma
;
196 ofld_req1
.rq_first_pbe_addr_hi
=
197 (u32
)((u64
) tgt
->rq_dma
>> 32);
199 ofld_req1
.rq_prod
= 0x8000;
201 /* Initialize offload request 2 structure */
202 memset(&ofld_req2
, 0x00, sizeof(struct fcoe_kwqe_conn_offload2
));
204 ofld_req2
.hdr
.op_code
= FCOE_KWQE_OPCODE_OFFLOAD_CONN2
;
205 ofld_req2
.hdr
.flags
=
206 (FCOE_KWQE_LAYER_CODE
<< FCOE_KWQE_HEADER_LAYER_CODE_SHIFT
);
208 ofld_req2
.tx_max_fc_pay_len
= rdata
->maxframe_size
;
210 ofld_req2
.cq_addr_lo
= (u32
) tgt
->cq_dma
;
211 ofld_req2
.cq_addr_hi
= (u32
)((u64
)tgt
->cq_dma
>> 32);
213 ofld_req2
.xferq_addr_lo
= (u32
) tgt
->xferq_dma
;
214 ofld_req2
.xferq_addr_hi
= (u32
)((u64
)tgt
->xferq_dma
>> 32);
216 ofld_req2
.conn_db_addr_lo
= (u32
)tgt
->conn_db_dma
;
217 ofld_req2
.conn_db_addr_hi
= (u32
)((u64
)tgt
->conn_db_dma
>> 32);
219 /* Initialize offload request 3 structure */
220 memset(&ofld_req3
, 0x00, sizeof(struct fcoe_kwqe_conn_offload3
));
222 ofld_req3
.hdr
.op_code
= FCOE_KWQE_OPCODE_OFFLOAD_CONN3
;
223 ofld_req3
.hdr
.flags
=
224 (FCOE_KWQE_LAYER_CODE
<< FCOE_KWQE_HEADER_LAYER_CODE_SHIFT
);
226 ofld_req3
.vlan_tag
= hba
->vlan_id
<<
227 FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT
;
228 ofld_req3
.vlan_tag
|= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT
;
230 port_id
= fc_host_port_id(lport
->host
);
232 BNX2FC_HBA_DBG(lport
, "ofld_req: port_id = 0, link down?\n");
237 * Store s_id of the initiator for further reference. This will
238 * be used during disable/destroy during linkdown processing as
239 * when the lport is reset, the port_id also is reset to 0
242 ofld_req3
.s_id
[0] = (port_id
& 0x000000FF);
243 ofld_req3
.s_id
[1] = (port_id
& 0x0000FF00) >> 8;
244 ofld_req3
.s_id
[2] = (port_id
& 0x00FF0000) >> 16;
246 port_id
= rport
->port_id
;
247 ofld_req3
.d_id
[0] = (port_id
& 0x000000FF);
248 ofld_req3
.d_id
[1] = (port_id
& 0x0000FF00) >> 8;
249 ofld_req3
.d_id
[2] = (port_id
& 0x00FF0000) >> 16;
251 ofld_req3
.tx_total_conc_seqs
= rdata
->max_seq
;
253 ofld_req3
.tx_max_conc_seqs_c3
= rdata
->max_seq
;
254 ofld_req3
.rx_max_fc_pay_len
= lport
->mfs
;
256 ofld_req3
.rx_total_conc_seqs
= BNX2FC_MAX_SEQS
;
257 ofld_req3
.rx_max_conc_seqs_c3
= BNX2FC_MAX_SEQS
;
258 ofld_req3
.rx_open_seqs_exch_c3
= 1;
260 ofld_req3
.confq_first_pbe_addr_lo
= tgt
->confq_dma
;
261 ofld_req3
.confq_first_pbe_addr_hi
= (u32
)((u64
) tgt
->confq_dma
>> 32);
263 /* set mul_n_port_ids supported flag to 0, until it is supported */
266 ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
267 FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
269 /* Info from PLOGI response */
270 ofld_req3
.flags
|= (((rdata
->sp_features
& FC_SP_FT_EDTR
) ? 1 : 0) <<
271 FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT
);
273 ofld_req3
.flags
|= (((rdata
->sp_features
& FC_SP_FT_SEQC
) ? 1 : 0) <<
274 FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT
);
277 ofld_req3
.flags
|= (hba
->vlan_enabled
<<
278 FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT
);
280 /* C2_VALID and ACK flags are not set as they are not suppported */
283 /* Initialize offload request 4 structure */
284 memset(&ofld_req4
, 0x00, sizeof(struct fcoe_kwqe_conn_offload4
));
285 ofld_req4
.hdr
.op_code
= FCOE_KWQE_OPCODE_OFFLOAD_CONN4
;
286 ofld_req4
.hdr
.flags
=
287 (FCOE_KWQE_LAYER_CODE
<< FCOE_KWQE_HEADER_LAYER_CODE_SHIFT
);
289 ofld_req4
.e_d_tov_timer_val
= lport
->e_d_tov
/ 20;
292 ofld_req4
.src_mac_addr_lo32
[0] = port
->data_src_addr
[5];
294 ofld_req4
.src_mac_addr_lo32
[1] = port
->data_src_addr
[4];
295 ofld_req4
.src_mac_addr_lo32
[2] = port
->data_src_addr
[3];
296 ofld_req4
.src_mac_addr_lo32
[3] = port
->data_src_addr
[2];
297 ofld_req4
.src_mac_addr_hi16
[0] = port
->data_src_addr
[1];
298 ofld_req4
.src_mac_addr_hi16
[1] = port
->data_src_addr
[0];
299 ofld_req4
.dst_mac_addr_lo32
[0] = hba
->ctlr
.dest_addr
[5];/* fcf mac */
300 ofld_req4
.dst_mac_addr_lo32
[1] = hba
->ctlr
.dest_addr
[4];
301 ofld_req4
.dst_mac_addr_lo32
[2] = hba
->ctlr
.dest_addr
[3];
302 ofld_req4
.dst_mac_addr_lo32
[3] = hba
->ctlr
.dest_addr
[2];
303 ofld_req4
.dst_mac_addr_hi16
[0] = hba
->ctlr
.dest_addr
[1];
304 ofld_req4
.dst_mac_addr_hi16
[1] = hba
->ctlr
.dest_addr
[0];
306 ofld_req4
.lcq_addr_lo
= (u32
) tgt
->lcq_dma
;
307 ofld_req4
.lcq_addr_hi
= (u32
)((u64
) tgt
->lcq_dma
>> 32);
309 ofld_req4
.confq_pbl_base_addr_lo
= (u32
) tgt
->confq_pbl_dma
;
310 ofld_req4
.confq_pbl_base_addr_hi
=
311 (u32
)((u64
) tgt
->confq_pbl_dma
>> 32);
313 kwqe_arr
[0] = (struct kwqe
*) &ofld_req1
;
314 kwqe_arr
[1] = (struct kwqe
*) &ofld_req2
;
315 kwqe_arr
[2] = (struct kwqe
*) &ofld_req3
;
316 kwqe_arr
[3] = (struct kwqe
*) &ofld_req4
;
318 if (hba
->cnic
&& hba
->cnic
->submit_kwqes
)
319 rc
= hba
->cnic
->submit_kwqes(hba
->cnic
, kwqe_arr
, num_kwqes
);
325 * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
327 * @port: port structure pointer
328 * @tgt: bnx2fc_rport structure pointer
330 static int bnx2fc_send_session_enable_req(struct fcoe_port
*port
,
331 struct bnx2fc_rport
*tgt
)
333 struct kwqe
*kwqe_arr
[2];
334 struct bnx2fc_hba
*hba
= port
->priv
;
335 struct fcoe_kwqe_conn_enable_disable enbl_req
;
336 struct fc_lport
*lport
= port
->lport
;
337 struct fc_rport
*rport
= tgt
->rport
;
342 memset(&enbl_req
, 0x00,
343 sizeof(struct fcoe_kwqe_conn_enable_disable
));
344 enbl_req
.hdr
.op_code
= FCOE_KWQE_OPCODE_ENABLE_CONN
;
346 (FCOE_KWQE_LAYER_CODE
<< FCOE_KWQE_HEADER_LAYER_CODE_SHIFT
);
348 enbl_req
.src_mac_addr_lo32
[0] = port
->data_src_addr
[5];
350 enbl_req
.src_mac_addr_lo32
[1] = port
->data_src_addr
[4];
351 enbl_req
.src_mac_addr_lo32
[2] = port
->data_src_addr
[3];
352 enbl_req
.src_mac_addr_lo32
[3] = port
->data_src_addr
[2];
353 enbl_req
.src_mac_addr_hi16
[0] = port
->data_src_addr
[1];
354 enbl_req
.src_mac_addr_hi16
[1] = port
->data_src_addr
[0];
356 enbl_req
.dst_mac_addr_lo32
[0] = hba
->ctlr
.dest_addr
[5];/* fcf mac */
357 enbl_req
.dst_mac_addr_lo32
[1] = hba
->ctlr
.dest_addr
[4];
358 enbl_req
.dst_mac_addr_lo32
[2] = hba
->ctlr
.dest_addr
[3];
359 enbl_req
.dst_mac_addr_lo32
[3] = hba
->ctlr
.dest_addr
[2];
360 enbl_req
.dst_mac_addr_hi16
[0] = hba
->ctlr
.dest_addr
[1];
361 enbl_req
.dst_mac_addr_hi16
[1] = hba
->ctlr
.dest_addr
[0];
363 port_id
= fc_host_port_id(lport
->host
);
364 if (port_id
!= tgt
->sid
) {
365 printk(KERN_ERR PFX
"WARN: enable_req port_id = 0x%x,"
366 "sid = 0x%x\n", port_id
, tgt
->sid
);
369 enbl_req
.s_id
[0] = (port_id
& 0x000000FF);
370 enbl_req
.s_id
[1] = (port_id
& 0x0000FF00) >> 8;
371 enbl_req
.s_id
[2] = (port_id
& 0x00FF0000) >> 16;
373 port_id
= rport
->port_id
;
374 enbl_req
.d_id
[0] = (port_id
& 0x000000FF);
375 enbl_req
.d_id
[1] = (port_id
& 0x0000FF00) >> 8;
376 enbl_req
.d_id
[2] = (port_id
& 0x00FF0000) >> 16;
377 enbl_req
.vlan_tag
= hba
->vlan_id
<<
378 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT
;
379 enbl_req
.vlan_tag
|= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT
;
380 enbl_req
.vlan_flag
= hba
->vlan_enabled
;
381 enbl_req
.context_id
= tgt
->context_id
;
382 enbl_req
.conn_id
= tgt
->fcoe_conn_id
;
384 kwqe_arr
[0] = (struct kwqe
*) &enbl_req
;
386 if (hba
->cnic
&& hba
->cnic
->submit_kwqes
)
387 rc
= hba
->cnic
->submit_kwqes(hba
->cnic
, kwqe_arr
, num_kwqes
);
392 * bnx2fc_send_session_disable_req - initiates FCoE Session disable
394 * @port: port structure pointer
395 * @tgt: bnx2fc_rport structure pointer
397 int bnx2fc_send_session_disable_req(struct fcoe_port
*port
,
398 struct bnx2fc_rport
*tgt
)
400 struct bnx2fc_hba
*hba
= port
->priv
;
401 struct fcoe_kwqe_conn_enable_disable disable_req
;
402 struct kwqe
*kwqe_arr
[2];
403 struct fc_rport
*rport
= tgt
->rport
;
408 memset(&disable_req
, 0x00,
409 sizeof(struct fcoe_kwqe_conn_enable_disable
));
410 disable_req
.hdr
.op_code
= FCOE_KWQE_OPCODE_DISABLE_CONN
;
411 disable_req
.hdr
.flags
=
412 (FCOE_KWQE_LAYER_CODE
<< FCOE_KWQE_HEADER_LAYER_CODE_SHIFT
);
414 disable_req
.src_mac_addr_lo32
[0] = port
->data_src_addr
[5];
415 disable_req
.src_mac_addr_lo32
[2] = port
->data_src_addr
[3];
416 disable_req
.src_mac_addr_lo32
[3] = port
->data_src_addr
[2];
417 disable_req
.src_mac_addr_hi16
[0] = port
->data_src_addr
[1];
418 disable_req
.src_mac_addr_hi16
[1] = port
->data_src_addr
[0];
420 disable_req
.dst_mac_addr_lo32
[0] = hba
->ctlr
.dest_addr
[5];/* fcf mac */
421 disable_req
.dst_mac_addr_lo32
[1] = hba
->ctlr
.dest_addr
[4];
422 disable_req
.dst_mac_addr_lo32
[2] = hba
->ctlr
.dest_addr
[3];
423 disable_req
.dst_mac_addr_lo32
[3] = hba
->ctlr
.dest_addr
[2];
424 disable_req
.dst_mac_addr_hi16
[0] = hba
->ctlr
.dest_addr
[1];
425 disable_req
.dst_mac_addr_hi16
[1] = hba
->ctlr
.dest_addr
[0];
428 disable_req
.s_id
[0] = (port_id
& 0x000000FF);
429 disable_req
.s_id
[1] = (port_id
& 0x0000FF00) >> 8;
430 disable_req
.s_id
[2] = (port_id
& 0x00FF0000) >> 16;
433 port_id
= rport
->port_id
;
434 disable_req
.d_id
[0] = (port_id
& 0x000000FF);
435 disable_req
.d_id
[1] = (port_id
& 0x0000FF00) >> 8;
436 disable_req
.d_id
[2] = (port_id
& 0x00FF0000) >> 16;
437 disable_req
.context_id
= tgt
->context_id
;
438 disable_req
.conn_id
= tgt
->fcoe_conn_id
;
439 disable_req
.vlan_tag
= hba
->vlan_id
<<
440 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT
;
441 disable_req
.vlan_tag
|=
442 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT
;
443 disable_req
.vlan_flag
= hba
->vlan_enabled
;
445 kwqe_arr
[0] = (struct kwqe
*) &disable_req
;
447 if (hba
->cnic
&& hba
->cnic
->submit_kwqes
)
448 rc
= hba
->cnic
->submit_kwqes(hba
->cnic
, kwqe_arr
, num_kwqes
);
454 * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
456 * @port: port structure pointer
457 * @tgt: bnx2fc_rport structure pointer
459 int bnx2fc_send_session_destroy_req(struct bnx2fc_hba
*hba
,
460 struct bnx2fc_rport
*tgt
)
462 struct fcoe_kwqe_conn_destroy destroy_req
;
463 struct kwqe
*kwqe_arr
[2];
467 memset(&destroy_req
, 0x00, sizeof(struct fcoe_kwqe_conn_destroy
));
468 destroy_req
.hdr
.op_code
= FCOE_KWQE_OPCODE_DESTROY_CONN
;
469 destroy_req
.hdr
.flags
=
470 (FCOE_KWQE_LAYER_CODE
<< FCOE_KWQE_HEADER_LAYER_CODE_SHIFT
);
472 destroy_req
.context_id
= tgt
->context_id
;
473 destroy_req
.conn_id
= tgt
->fcoe_conn_id
;
475 kwqe_arr
[0] = (struct kwqe
*) &destroy_req
;
477 if (hba
->cnic
&& hba
->cnic
->submit_kwqes
)
478 rc
= hba
->cnic
->submit_kwqes(hba
->cnic
, kwqe_arr
, num_kwqes
);
483 static void bnx2fc_unsol_els_work(struct work_struct
*work
)
485 struct bnx2fc_unsol_els
*unsol_els
;
486 struct fc_lport
*lport
;
489 unsol_els
= container_of(work
, struct bnx2fc_unsol_els
, unsol_els_work
);
490 lport
= unsol_els
->lport
;
492 fc_exch_recv(lport
, fp
);
496 void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport
*tgt
,
498 u32 frame_len
, u16 l2_oxid
)
500 struct fcoe_port
*port
= tgt
->port
;
501 struct fc_lport
*lport
= port
->lport
;
502 struct bnx2fc_unsol_els
*unsol_els
;
503 struct fc_frame_header
*fh
;
511 unsol_els
= kzalloc(sizeof(*unsol_els
), GFP_ATOMIC
);
513 BNX2FC_TGT_DBG(tgt
, "Unable to allocate unsol_work\n");
517 BNX2FC_TGT_DBG(tgt
, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
520 payload_len
= frame_len
- sizeof(struct fc_frame_header
);
522 fp
= fc_frame_alloc(lport
, payload_len
);
524 printk(KERN_ERR PFX
"fc_frame_alloc failure\n");
528 fh
= (struct fc_frame_header
*) fc_frame_header_get(fp
);
529 /* Copy FC Frame header and payload into the frame */
530 memcpy(fh
, buf
, frame_len
);
532 if (l2_oxid
!= FC_XID_UNKNOWN
)
533 fh
->fh_ox_id
= htons(l2_oxid
);
537 if ((fh
->fh_r_ctl
== FC_RCTL_ELS_REQ
) ||
538 (fh
->fh_r_ctl
== FC_RCTL_ELS_REP
)) {
540 if (fh
->fh_type
== FC_TYPE_ELS
) {
541 op
= fc_frame_payload_op(fp
);
542 if ((op
== ELS_TEST
) || (op
== ELS_ESTC
) ||
543 (op
== ELS_FAN
) || (op
== ELS_CSU
)) {
545 * No need to reply for these
548 printk(KERN_ERR PFX
"dropping ELS 0x%x\n", op
);
553 crc
= fcoe_fc_crc(fp
);
556 fr_sof(fp
) = FC_SOF_I3
;
557 fr_eof(fp
) = FC_EOF_T
;
558 fr_crc(fp
) = cpu_to_le32(~crc
);
559 unsol_els
->lport
= lport
;
561 INIT_WORK(&unsol_els
->unsol_els_work
, bnx2fc_unsol_els_work
);
562 queue_work(bnx2fc_wq
, &unsol_els
->unsol_els_work
);
564 BNX2FC_HBA_DBG(lport
, "fh_r_ctl = 0x%x\n", fh
->fh_r_ctl
);
569 static void bnx2fc_process_unsol_compl(struct bnx2fc_rport
*tgt
, u16 wqe
)
572 struct fcoe_err_report_entry
*err_entry
;
573 unsigned char *rq_data
;
574 unsigned char *buf
= NULL
, *buf1
;
578 struct bnx2fc_cmd
*io_req
= NULL
;
579 struct fcoe_task_ctx_entry
*task
, *task_page
;
580 struct bnx2fc_hba
*hba
= tgt
->port
->priv
;
585 BNX2FC_TGT_DBG(tgt
, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe
);
586 switch (wqe
& FCOE_UNSOLICITED_CQE_SUBTYPE
) {
587 case FCOE_UNSOLICITED_FRAME_CQE_TYPE
:
588 frame_len
= (wqe
& FCOE_UNSOLICITED_CQE_PKT_LEN
) >>
589 FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT
;
591 num_rq
= (frame_len
+ BNX2FC_RQ_BUF_SZ
- 1) / BNX2FC_RQ_BUF_SZ
;
593 spin_lock_bh(&tgt
->tgt_lock
);
594 rq_data
= (unsigned char *)bnx2fc_get_next_rqe(tgt
, num_rq
);
595 spin_unlock_bh(&tgt
->tgt_lock
);
600 buf1
= buf
= kmalloc((num_rq
* BNX2FC_RQ_BUF_SZ
),
604 BNX2FC_TGT_DBG(tgt
, "Memory alloc failure\n");
608 for (i
= 0; i
< num_rq
; i
++) {
609 spin_lock_bh(&tgt
->tgt_lock
);
610 rq_data
= (unsigned char *)
611 bnx2fc_get_next_rqe(tgt
, 1);
612 spin_unlock_bh(&tgt
->tgt_lock
);
613 len
= BNX2FC_RQ_BUF_SZ
;
614 memcpy(buf1
, rq_data
, len
);
618 bnx2fc_process_l2_frame_compl(tgt
, buf
, frame_len
,
623 spin_lock_bh(&tgt
->tgt_lock
);
624 bnx2fc_return_rqe(tgt
, num_rq
);
625 spin_unlock_bh(&tgt
->tgt_lock
);
628 case FCOE_ERROR_DETECTION_CQE_TYPE
:
630 * In case of error reporting CQE a single RQ entry
633 spin_lock_bh(&tgt
->tgt_lock
);
635 err_entry
= (struct fcoe_err_report_entry
*)
636 bnx2fc_get_next_rqe(tgt
, 1);
637 xid
= err_entry
->fc_hdr
.ox_id
;
638 BNX2FC_TGT_DBG(tgt
, "Unsol Error Frame OX_ID = 0x%x\n", xid
);
639 BNX2FC_TGT_DBG(tgt
, "err_warn_bitmap = %08x:%08x\n",
640 err_entry
->err_warn_bitmap_hi
,
641 err_entry
->err_warn_bitmap_lo
);
642 BNX2FC_TGT_DBG(tgt
, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
643 err_entry
->tx_buf_off
, err_entry
->rx_buf_off
);
645 bnx2fc_return_rqe(tgt
, 1);
647 if (xid
> BNX2FC_MAX_XID
) {
648 BNX2FC_TGT_DBG(tgt
, "xid(0x%x) out of FW range\n",
650 spin_unlock_bh(&tgt
->tgt_lock
);
654 task_idx
= xid
/ BNX2FC_TASKS_PER_PAGE
;
655 index
= xid
% BNX2FC_TASKS_PER_PAGE
;
656 task_page
= (struct fcoe_task_ctx_entry
*)
657 hba
->task_ctx
[task_idx
];
658 task
= &(task_page
[index
]);
660 io_req
= (struct bnx2fc_cmd
*)hba
->cmd_mgr
->cmds
[xid
];
662 spin_unlock_bh(&tgt
->tgt_lock
);
666 if (io_req
->cmd_type
!= BNX2FC_SCSI_CMD
) {
667 printk(KERN_ERR PFX
"err_warn: Not a SCSI cmd\n");
668 spin_unlock_bh(&tgt
->tgt_lock
);
672 if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP
,
673 &io_req
->req_flags
)) {
674 BNX2FC_IO_DBG(io_req
, "unsol_err: cleanup in "
675 "progress.. ignore unsol err\n");
676 spin_unlock_bh(&tgt
->tgt_lock
);
681 * If ABTS is already in progress, and FW error is
682 * received after that, do not cancel the timeout_work
683 * and let the error recovery continue by explicitly
684 * logging out the target, when the ABTS eventually
687 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS
,
688 &io_req
->req_flags
)) {
690 * Cancel the timeout_work, as we received IO
691 * completion with FW error.
693 if (cancel_delayed_work(&io_req
->timeout_work
))
694 kref_put(&io_req
->refcount
,
695 bnx2fc_cmd_release
); /* timer hold */
697 rc
= bnx2fc_initiate_abts(io_req
);
699 BNX2FC_IO_DBG(io_req
, "err_warn: initiate_abts "
700 "failed. issue cleanup\n");
701 rc
= bnx2fc_initiate_cleanup(io_req
);
705 printk(KERN_ERR PFX
"err_warn: io_req (0x%x) already "
706 "in ABTS processing\n", xid
);
707 spin_unlock_bh(&tgt
->tgt_lock
);
710 case FCOE_WARNING_DETECTION_CQE_TYPE
:
712 *In case of warning reporting CQE a single RQ entry
715 spin_lock_bh(&tgt
->tgt_lock
);
717 err_entry
= (struct fcoe_err_report_entry
*)
718 bnx2fc_get_next_rqe(tgt
, 1);
719 xid
= cpu_to_be16(err_entry
->fc_hdr
.ox_id
);
720 BNX2FC_TGT_DBG(tgt
, "Unsol Warning Frame OX_ID = 0x%x\n", xid
);
721 BNX2FC_TGT_DBG(tgt
, "err_warn_bitmap = %08x:%08x",
722 err_entry
->err_warn_bitmap_hi
,
723 err_entry
->err_warn_bitmap_lo
);
724 BNX2FC_TGT_DBG(tgt
, "buf_offsets - tx = 0x%x, rx = 0x%x",
725 err_entry
->tx_buf_off
, err_entry
->rx_buf_off
);
727 bnx2fc_return_rqe(tgt
, 1);
728 spin_unlock_bh(&tgt
->tgt_lock
);
732 printk(KERN_ERR PFX
"Unsol Compl: Invalid CQE Subtype\n");
737 void bnx2fc_process_cq_compl(struct bnx2fc_rport
*tgt
, u16 wqe
)
739 struct fcoe_task_ctx_entry
*task
;
740 struct fcoe_task_ctx_entry
*task_page
;
741 struct fcoe_port
*port
= tgt
->port
;
742 struct bnx2fc_hba
*hba
= port
->priv
;
743 struct bnx2fc_cmd
*io_req
;
750 spin_lock_bh(&tgt
->tgt_lock
);
751 xid
= wqe
& FCOE_PEND_WQ_CQE_TASK_ID
;
752 if (xid
>= BNX2FC_MAX_TASKS
) {
753 printk(KERN_ALERT PFX
"ERROR:xid out of range\n");
754 spin_unlock_bh(&tgt
->tgt_lock
);
757 task_idx
= xid
/ BNX2FC_TASKS_PER_PAGE
;
758 index
= xid
% BNX2FC_TASKS_PER_PAGE
;
759 task_page
= (struct fcoe_task_ctx_entry
*)hba
->task_ctx
[task_idx
];
760 task
= &(task_page
[index
]);
762 num_rq
= ((task
->rx_wr_tx_rd
.rx_flags
&
763 FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE
) >>
764 FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT
);
766 io_req
= (struct bnx2fc_cmd
*)hba
->cmd_mgr
->cmds
[xid
];
768 if (io_req
== NULL
) {
769 printk(KERN_ERR PFX
"ERROR? cq_compl - io_req is NULL\n");
770 spin_unlock_bh(&tgt
->tgt_lock
);
774 /* Timestamp IO completion time */
775 cmd_type
= io_req
->cmd_type
;
777 /* optimized completion path */
778 if (cmd_type
== BNX2FC_SCSI_CMD
) {
779 rx_state
= ((task
->rx_wr_tx_rd
.rx_flags
&
780 FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE
) >>
781 FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT
);
783 if (rx_state
== FCOE_TASK_RX_STATE_COMPLETED
) {
784 bnx2fc_process_scsi_cmd_compl(io_req
, task
, num_rq
);
785 spin_unlock_bh(&tgt
->tgt_lock
);
790 /* Process other IO completion types */
792 case BNX2FC_SCSI_CMD
:
793 if (rx_state
== FCOE_TASK_RX_STATE_ABTS_COMPLETED
)
794 bnx2fc_process_abts_compl(io_req
, task
, num_rq
);
796 FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED
)
797 bnx2fc_process_cleanup_compl(io_req
, task
, num_rq
);
799 printk(KERN_ERR PFX
"Invalid rx state - %d\n",
803 case BNX2FC_TASK_MGMT_CMD
:
804 BNX2FC_IO_DBG(io_req
, "Processing TM complete\n");
805 bnx2fc_process_tm_compl(io_req
, task
, num_rq
);
810 * ABTS request received by firmware. ABTS response
811 * will be delivered to the task belonging to the IO
814 BNX2FC_IO_DBG(io_req
, "cq_compl- ABTS sent out by fw\n");
815 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
819 BNX2FC_IO_DBG(io_req
, "cq_compl - call process_els_compl\n");
820 bnx2fc_process_els_compl(io_req
, task
, num_rq
);
824 BNX2FC_IO_DBG(io_req
, "cq_compl- cleanup resp rcvd\n");
825 kref_put(&io_req
->refcount
, bnx2fc_cmd_release
);
829 printk(KERN_ERR PFX
"Invalid cmd_type %d\n", cmd_type
);
832 spin_unlock_bh(&tgt
->tgt_lock
);
835 struct bnx2fc_work
*bnx2fc_alloc_work(struct bnx2fc_rport
*tgt
, u16 wqe
)
837 struct bnx2fc_work
*work
;
838 work
= kzalloc(sizeof(struct bnx2fc_work
), GFP_ATOMIC
);
842 INIT_LIST_HEAD(&work
->list
);
848 int bnx2fc_process_new_cqes(struct bnx2fc_rport
*tgt
)
852 struct fcoe_cqe
*cqe
;
854 bool more_cqes_found
= false;
857 * cq_lock is a low contention lock used to protect
858 * the CQ data structure from being freed up during
859 * the upload operation
861 spin_lock_bh(&tgt
->cq_lock
);
864 printk(KERN_ERR PFX
"process_new_cqes: cq is NULL\n");
865 spin_unlock_bh(&tgt
->cq_lock
);
869 cq_cons
= tgt
->cq_cons_idx
;
873 more_cqes_found
^= true;
875 while (((wqe
= cqe
->wqe
) & FCOE_CQE_TOGGLE_BIT
) ==
876 (tgt
->cq_curr_toggle_bit
<<
877 FCOE_CQE_TOGGLE_BIT_SHIFT
)) {
879 /* new entry on the cq */
880 if (wqe
& FCOE_CQE_CQE_TYPE
) {
881 /* Unsolicited event notification */
882 bnx2fc_process_unsol_compl(tgt
, wqe
);
884 struct bnx2fc_work
*work
= NULL
;
885 struct bnx2fc_percpu_s
*fps
= NULL
;
886 unsigned int cpu
= wqe
% num_possible_cpus();
888 fps
= &per_cpu(bnx2fc_percpu
, cpu
);
889 spin_lock_bh(&fps
->fp_work_lock
);
890 if (unlikely(!fps
->iothread
))
893 work
= bnx2fc_alloc_work(tgt
, wqe
);
895 list_add_tail(&work
->list
,
898 spin_unlock_bh(&fps
->fp_work_lock
);
900 /* Pending work request completion */
901 if (fps
->iothread
&& work
)
902 wake_up_process(fps
->iothread
);
904 bnx2fc_process_cq_compl(tgt
, wqe
);
909 if (tgt
->cq_cons_idx
== BNX2FC_CQ_WQES_MAX
) {
910 tgt
->cq_cons_idx
= 0;
912 tgt
->cq_curr_toggle_bit
=
913 1 - tgt
->cq_curr_toggle_bit
;
917 if (more_cqes_found
) {
918 tgt
->conn_db
->cq_arm
.lo
= -1;
921 } while (more_cqes_found
);
924 * Commit tgt->cq_cons_idx change to the memory
925 * spin_lock implies full memory barrier, no need to smp_wmb
928 spin_unlock_bh(&tgt
->cq_lock
);
933 * bnx2fc_fastpath_notification - process global event queue (KCQ)
935 * @hba: adapter structure pointer
936 * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry
938 * Fast path event notification handler
940 static void bnx2fc_fastpath_notification(struct bnx2fc_hba
*hba
,
941 struct fcoe_kcqe
*new_cqe_kcqe
)
943 u32 conn_id
= new_cqe_kcqe
->fcoe_conn_id
;
944 struct bnx2fc_rport
*tgt
= hba
->tgt_ofld_list
[conn_id
];
947 printk(KERN_ALERT PFX
"conn_id 0x%x not valid\n", conn_id
);
951 bnx2fc_process_new_cqes(tgt
);
955 * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
957 * @hba: adapter structure pointer
958 * @ofld_kcqe: connection offload kcqe pointer
960 * handle session offload completion, enable the session if offload is
963 static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba
*hba
,
964 struct fcoe_kcqe
*ofld_kcqe
)
966 struct bnx2fc_rport
*tgt
;
967 struct fcoe_port
*port
;
972 conn_id
= ofld_kcqe
->fcoe_conn_id
;
973 context_id
= ofld_kcqe
->fcoe_conn_context_id
;
974 tgt
= hba
->tgt_ofld_list
[conn_id
];
976 printk(KERN_ALERT PFX
"ERROR:ofld_cmpl: No pending ofld req\n");
979 BNX2FC_TGT_DBG(tgt
, "Entered ofld compl - context_id = 0x%x\n",
980 ofld_kcqe
->fcoe_conn_context_id
);
982 if (hba
!= tgt
->port
->priv
) {
983 printk(KERN_ALERT PFX
"ERROR:ofld_cmpl: HBA mis-match\n");
987 * cnic has allocated a context_id for this session; use this
988 * while enabling the session.
990 tgt
->context_id
= context_id
;
991 if (ofld_kcqe
->completion_status
) {
992 if (ofld_kcqe
->completion_status
==
993 FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE
) {
994 printk(KERN_ERR PFX
"unable to allocate FCoE context "
996 set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE
, &tgt
->flags
);
1001 /* now enable the session */
1002 rc
= bnx2fc_send_session_enable_req(port
, tgt
);
1004 printk(KERN_ALERT PFX
"enable session failed\n");
1010 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL
, &tgt
->flags
);
1011 wake_up_interruptible(&tgt
->ofld_wait
);
1015 * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
1017 * @hba: adapter structure pointer
1018 * @ofld_kcqe: connection offload kcqe pointer
1020 * handle session enable completion, mark the rport as ready
1023 static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba
*hba
,
1024 struct fcoe_kcqe
*ofld_kcqe
)
1026 struct bnx2fc_rport
*tgt
;
1030 context_id
= ofld_kcqe
->fcoe_conn_context_id
;
1031 conn_id
= ofld_kcqe
->fcoe_conn_id
;
1032 tgt
= hba
->tgt_ofld_list
[conn_id
];
1034 printk(KERN_ALERT PFX
"ERROR:enbl_cmpl: No pending ofld req\n");
1038 BNX2FC_TGT_DBG(tgt
, "Enable compl - context_id = 0x%x\n",
1039 ofld_kcqe
->fcoe_conn_context_id
);
1042 * context_id should be the same for this target during offload
1045 if (tgt
->context_id
!= context_id
) {
1046 printk(KERN_ALERT PFX
"context id mis-match\n");
1049 if (hba
!= tgt
->port
->priv
) {
1050 printk(KERN_ALERT PFX
"bnx2fc-enbl_cmpl: HBA mis-match\n");
1053 if (ofld_kcqe
->completion_status
) {
1056 /* enable successful - rport ready for issuing IOs */
1057 set_bit(BNX2FC_FLAG_OFFLOADED
, &tgt
->flags
);
1058 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL
, &tgt
->flags
);
1059 wake_up_interruptible(&tgt
->ofld_wait
);
1064 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL
, &tgt
->flags
);
1065 wake_up_interruptible(&tgt
->ofld_wait
);
1068 static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba
*hba
,
1069 struct fcoe_kcqe
*disable_kcqe
)
1072 struct bnx2fc_rport
*tgt
;
1075 conn_id
= disable_kcqe
->fcoe_conn_id
;
1076 tgt
= hba
->tgt_ofld_list
[conn_id
];
1078 printk(KERN_ALERT PFX
"ERROR: disable_cmpl: No disable req\n");
1082 BNX2FC_TGT_DBG(tgt
, PFX
"disable_cmpl: conn_id %d\n", conn_id
);
1084 if (disable_kcqe
->completion_status
) {
1085 printk(KERN_ALERT PFX
"ERROR: Disable failed with cmpl status %d\n",
1086 disable_kcqe
->completion_status
);
1089 /* disable successful */
1090 BNX2FC_TGT_DBG(tgt
, "disable successful\n");
1091 clear_bit(BNX2FC_FLAG_OFFLOADED
, &tgt
->flags
);
1092 set_bit(BNX2FC_FLAG_DISABLED
, &tgt
->flags
);
1093 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL
, &tgt
->flags
);
1094 wake_up_interruptible(&tgt
->upld_wait
);
1098 static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba
*hba
,
1099 struct fcoe_kcqe
*destroy_kcqe
)
1101 struct bnx2fc_rport
*tgt
;
1104 conn_id
= destroy_kcqe
->fcoe_conn_id
;
1105 tgt
= hba
->tgt_ofld_list
[conn_id
];
1107 printk(KERN_ALERT PFX
"destroy_cmpl: No destroy req\n");
1111 BNX2FC_TGT_DBG(tgt
, "destroy_cmpl: conn_id %d\n", conn_id
);
1113 if (destroy_kcqe
->completion_status
) {
1114 printk(KERN_ALERT PFX
"Destroy conn failed, cmpl status %d\n",
1115 destroy_kcqe
->completion_status
);
1118 /* destroy successful */
1119 BNX2FC_TGT_DBG(tgt
, "upload successful\n");
1120 clear_bit(BNX2FC_FLAG_DISABLED
, &tgt
->flags
);
1121 set_bit(BNX2FC_FLAG_DESTROYED
, &tgt
->flags
);
1122 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL
, &tgt
->flags
);
1123 wake_up_interruptible(&tgt
->upld_wait
);
1127 static void bnx2fc_init_failure(struct bnx2fc_hba
*hba
, u32 err_code
)
1130 case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE
:
1131 printk(KERN_ERR PFX
"init_failure due to invalid opcode\n");
1134 case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE
:
1135 printk(KERN_ERR PFX
"init failed due to ctx alloc failure\n");
1138 case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR
:
1139 printk(KERN_ERR PFX
"init_failure due to NIC error\n");
1143 printk(KERN_ERR PFX
"Unknown Error code %d\n", err_code
);
1148 * bnx2fc_indicae_kcqe - process KCQE
1150 * @hba: adapter structure pointer
1151 * @kcqe: kcqe pointer
1152 * @num_cqe: Number of completion queue elements
1154 * Generic KCQ event handler
1156 void bnx2fc_indicate_kcqe(void *context
, struct kcqe
*kcq
[],
1159 struct bnx2fc_hba
*hba
= (struct bnx2fc_hba
*)context
;
1161 struct fcoe_kcqe
*kcqe
= NULL
;
1163 while (i
< num_cqe
) {
1164 kcqe
= (struct fcoe_kcqe
*) kcq
[i
++];
1166 switch (kcqe
->op_code
) {
1167 case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION
:
1168 bnx2fc_fastpath_notification(hba
, kcqe
);
1171 case FCOE_KCQE_OPCODE_OFFLOAD_CONN
:
1172 bnx2fc_process_ofld_cmpl(hba
, kcqe
);
1175 case FCOE_KCQE_OPCODE_ENABLE_CONN
:
1176 bnx2fc_process_enable_conn_cmpl(hba
, kcqe
);
1179 case FCOE_KCQE_OPCODE_INIT_FUNC
:
1180 if (kcqe
->completion_status
!=
1181 FCOE_KCQE_COMPLETION_STATUS_SUCCESS
) {
1182 bnx2fc_init_failure(hba
,
1183 kcqe
->completion_status
);
1185 set_bit(ADAPTER_STATE_UP
, &hba
->adapter_state
);
1186 bnx2fc_get_link_state(hba
);
1187 printk(KERN_INFO PFX
"[%.2x]: FCOE_INIT passed\n",
1188 (u8
)hba
->pcidev
->bus
->number
);
1192 case FCOE_KCQE_OPCODE_DESTROY_FUNC
:
1193 if (kcqe
->completion_status
!=
1194 FCOE_KCQE_COMPLETION_STATUS_SUCCESS
) {
1196 printk(KERN_ERR PFX
"DESTROY failed\n");
1198 printk(KERN_ERR PFX
"DESTROY success\n");
1200 hba
->flags
|= BNX2FC_FLAG_DESTROY_CMPL
;
1201 wake_up_interruptible(&hba
->destroy_wait
);
1204 case FCOE_KCQE_OPCODE_DISABLE_CONN
:
1205 bnx2fc_process_conn_disable_cmpl(hba
, kcqe
);
1208 case FCOE_KCQE_OPCODE_DESTROY_CONN
:
1209 bnx2fc_process_conn_destroy_cmpl(hba
, kcqe
);
1212 case FCOE_KCQE_OPCODE_STAT_FUNC
:
1213 if (kcqe
->completion_status
!=
1214 FCOE_KCQE_COMPLETION_STATUS_SUCCESS
)
1215 printk(KERN_ERR PFX
"STAT failed\n");
1216 complete(&hba
->stat_req_done
);
1219 case FCOE_KCQE_OPCODE_FCOE_ERROR
:
1222 printk(KERN_ALERT PFX
"unknown opcode 0x%x\n",
1228 void bnx2fc_add_2_sq(struct bnx2fc_rport
*tgt
, u16 xid
)
1230 struct fcoe_sqe
*sqe
;
1232 sqe
= &tgt
->sq
[tgt
->sq_prod_idx
];
1235 sqe
->wqe
= xid
<< FCOE_SQE_TASK_ID_SHIFT
;
1236 sqe
->wqe
|= tgt
->sq_curr_toggle_bit
<< FCOE_SQE_TOGGLE_BIT_SHIFT
;
1238 /* Advance SQ Prod Idx */
1239 if (++tgt
->sq_prod_idx
== BNX2FC_SQ_WQES_MAX
) {
1240 tgt
->sq_prod_idx
= 0;
1241 tgt
->sq_curr_toggle_bit
= 1 - tgt
->sq_curr_toggle_bit
;
1245 void bnx2fc_ring_doorbell(struct bnx2fc_rport
*tgt
)
1247 struct b577xx_doorbell_set_prod ev_doorbell
;
1252 memset(&ev_doorbell
, 0, sizeof(struct b577xx_doorbell_set_prod
));
1253 ev_doorbell
.header
.header
= B577XX_DOORBELL_HDR_DB_TYPE
;
1255 ev_doorbell
.prod
= tgt
->sq_prod_idx
|
1256 (tgt
->sq_curr_toggle_bit
<< 15);
1257 ev_doorbell
.header
.header
|= B577XX_FCOE_CONNECTION_TYPE
<<
1258 B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT
;
1259 msg
= *((u32
*)&ev_doorbell
);
1260 writel(cpu_to_le32(msg
), tgt
->ctx_base
);
1266 int bnx2fc_map_doorbell(struct bnx2fc_rport
*tgt
)
1268 u32 context_id
= tgt
->context_id
;
1269 struct fcoe_port
*port
= tgt
->port
;
1271 resource_size_t reg_base
;
1272 struct bnx2fc_hba
*hba
= port
->priv
;
1274 reg_base
= pci_resource_start(hba
->pcidev
,
1275 BNX2X_DOORBELL_PCI_BAR
);
1276 reg_off
= BNX2FC_5771X_DB_PAGE_SIZE
*
1277 (context_id
& 0x1FFFF) + DPM_TRIGER_TYPE
;
1278 tgt
->ctx_base
= ioremap_nocache(reg_base
+ reg_off
, 4);
1284 char *bnx2fc_get_next_rqe(struct bnx2fc_rport
*tgt
, u8 num_items
)
1286 char *buf
= (char *)tgt
->rq
+ (tgt
->rq_cons_idx
* BNX2FC_RQ_BUF_SZ
);
1288 if (tgt
->rq_cons_idx
+ num_items
> BNX2FC_RQ_WQES_MAX
)
1291 tgt
->rq_cons_idx
+= num_items
;
1293 if (tgt
->rq_cons_idx
>= BNX2FC_RQ_WQES_MAX
)
1294 tgt
->rq_cons_idx
-= BNX2FC_RQ_WQES_MAX
;
1299 void bnx2fc_return_rqe(struct bnx2fc_rport
*tgt
, u8 num_items
)
1301 /* return the rq buffer */
1302 u32 next_prod_idx
= tgt
->rq_prod_idx
+ num_items
;
1303 if ((next_prod_idx
& 0x7fff) == BNX2FC_RQ_WQES_MAX
) {
1304 /* Wrap around RQ */
1305 next_prod_idx
+= 0x8000 - BNX2FC_RQ_WQES_MAX
;
1307 tgt
->rq_prod_idx
= next_prod_idx
;
1308 tgt
->conn_db
->rq_prod
= tgt
->rq_prod_idx
;
1311 void bnx2fc_init_cleanup_task(struct bnx2fc_cmd
*io_req
,
1312 struct fcoe_task_ctx_entry
*task
,
1315 u8 task_type
= FCOE_TASK_TYPE_EXCHANGE_CLEANUP
;
1316 struct bnx2fc_rport
*tgt
= io_req
->tgt
;
1317 u32 context_id
= tgt
->context_id
;
1319 memset(task
, 0, sizeof(struct fcoe_task_ctx_entry
));
1321 /* Tx Write Rx Read */
1322 task
->tx_wr_rx_rd
.tx_flags
= FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP
<<
1323 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT
;
1324 task
->tx_wr_rx_rd
.init_flags
= task_type
<<
1325 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT
;
1326 task
->tx_wr_rx_rd
.init_flags
|= FCOE_TASK_CLASS_TYPE_3
<<
1327 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT
;
1329 task
->cmn
.common_flags
= context_id
<<
1330 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT
;
1331 task
->cmn
.general
.cleanup_info
.task_id
= orig_xid
;
1336 void bnx2fc_init_mp_task(struct bnx2fc_cmd
*io_req
,
1337 struct fcoe_task_ctx_entry
*task
)
1339 struct bnx2fc_mp_req
*mp_req
= &(io_req
->mp_req
);
1340 struct bnx2fc_rport
*tgt
= io_req
->tgt
;
1341 struct fc_frame_header
*fc_hdr
;
1348 /* Obtain task_type */
1349 if ((io_req
->cmd_type
== BNX2FC_TASK_MGMT_CMD
) ||
1350 (io_req
->cmd_type
== BNX2FC_ELS
)) {
1351 task_type
= FCOE_TASK_TYPE_MIDPATH
;
1352 } else if (io_req
->cmd_type
== BNX2FC_ABTS
) {
1353 task_type
= FCOE_TASK_TYPE_ABTS
;
1356 memset(task
, 0, sizeof(struct fcoe_task_ctx_entry
));
1358 /* Setup the task from io_req for easy reference */
1359 io_req
->task
= task
;
1361 BNX2FC_IO_DBG(io_req
, "Init MP task for cmd_type = %d task_type = %d\n",
1362 io_req
->cmd_type
, task_type
);
1365 if ((task_type
== FCOE_TASK_TYPE_MIDPATH
) ||
1366 (task_type
== FCOE_TASK_TYPE_UNSOLICITED
)) {
1367 task
->tx_wr_only
.sgl_ctx
.mul_sges
.cur_sge_addr
.lo
=
1368 (u32
)mp_req
->mp_req_bd_dma
;
1369 task
->tx_wr_only
.sgl_ctx
.mul_sges
.cur_sge_addr
.hi
=
1370 (u32
)((u64
)mp_req
->mp_req_bd_dma
>> 32);
1371 task
->tx_wr_only
.sgl_ctx
.mul_sges
.sgl_size
= 1;
1372 BNX2FC_IO_DBG(io_req
, "init_mp_task - bd_dma = 0x%llx\n",
1373 (unsigned long long)mp_req
->mp_req_bd_dma
);
1376 /* Tx Write Rx Read */
1377 task
->tx_wr_rx_rd
.tx_flags
= FCOE_TASK_TX_STATE_INIT
<<
1378 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT
;
1379 task
->tx_wr_rx_rd
.init_flags
= task_type
<<
1380 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT
;
1381 task
->tx_wr_rx_rd
.init_flags
|= FCOE_TASK_DEV_TYPE_DISK
<<
1382 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT
;
1383 task
->tx_wr_rx_rd
.init_flags
|= FCOE_TASK_CLASS_TYPE_3
<<
1384 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT
;
1387 task
->cmn
.data_2_trns
= io_req
->data_xfer_len
;
1388 context_id
= tgt
->context_id
;
1389 task
->cmn
.common_flags
= context_id
<<
1390 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT
;
1391 task
->cmn
.common_flags
|= 1 <<
1392 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT
;
1393 task
->cmn
.common_flags
|= 1 <<
1394 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT
;
1396 /* Rx Write Tx Read */
1397 fc_hdr
= &(mp_req
->req_fc_hdr
);
1398 if (task_type
== FCOE_TASK_TYPE_MIDPATH
) {
1399 fc_hdr
->fh_ox_id
= cpu_to_be16(io_req
->xid
);
1400 fc_hdr
->fh_rx_id
= htons(0xffff);
1401 task
->rx_wr_tx_rd
.rx_id
= 0xffff;
1402 } else if (task_type
== FCOE_TASK_TYPE_UNSOLICITED
) {
1403 fc_hdr
->fh_rx_id
= cpu_to_be16(io_req
->xid
);
1406 /* Fill FC Header into middle path buffer */
1407 hdr
= (u64
*) &task
->cmn
.general
.cmd_info
.mp_fc_frame
.fc_hdr
;
1408 memcpy(temp_hdr
, fc_hdr
, sizeof(temp_hdr
));
1409 hdr
[0] = cpu_to_be64(temp_hdr
[0]);
1410 hdr
[1] = cpu_to_be64(temp_hdr
[1]);
1411 hdr
[2] = cpu_to_be64(temp_hdr
[2]);
1414 if (task_type
== FCOE_TASK_TYPE_MIDPATH
) {
1416 task
->rx_wr_only
.sgl_ctx
.mul_sges
.cur_sge_addr
.lo
=
1417 (u32
)mp_req
->mp_resp_bd_dma
;
1418 task
->rx_wr_only
.sgl_ctx
.mul_sges
.cur_sge_addr
.hi
=
1419 (u32
)((u64
)mp_req
->mp_resp_bd_dma
>> 32);
1420 task
->rx_wr_only
.sgl_ctx
.mul_sges
.sgl_size
= 1;
1424 void bnx2fc_init_task(struct bnx2fc_cmd
*io_req
,
1425 struct fcoe_task_ctx_entry
*task
)
1428 struct scsi_cmnd
*sc_cmd
= io_req
->sc_cmd
;
1429 struct io_bdt
*bd_tbl
= io_req
->bd_tbl
;
1430 struct bnx2fc_rport
*tgt
= io_req
->tgt
;
1432 u64 tmp_fcp_cmnd
[4];
1437 memset(task
, 0, sizeof(struct fcoe_task_ctx_entry
));
1439 /* Setup the task from io_req for easy reference */
1440 io_req
->task
= task
;
1442 if (sc_cmd
->sc_data_direction
== DMA_TO_DEVICE
)
1443 task_type
= FCOE_TASK_TYPE_WRITE
;
1445 task_type
= FCOE_TASK_TYPE_READ
;
1448 if (task_type
== FCOE_TASK_TYPE_WRITE
) {
1449 task
->tx_wr_only
.sgl_ctx
.mul_sges
.cur_sge_addr
.lo
=
1450 (u32
)bd_tbl
->bd_tbl_dma
;
1451 task
->tx_wr_only
.sgl_ctx
.mul_sges
.cur_sge_addr
.hi
=
1452 (u32
)((u64
)bd_tbl
->bd_tbl_dma
>> 32);
1453 task
->tx_wr_only
.sgl_ctx
.mul_sges
.sgl_size
=
1457 /*Tx Write Rx Read */
1458 /* Init state to NORMAL */
1459 task
->tx_wr_rx_rd
.tx_flags
= FCOE_TASK_TX_STATE_NORMAL
<<
1460 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT
;
1461 task
->tx_wr_rx_rd
.init_flags
= task_type
<<
1462 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT
;
1463 task
->tx_wr_rx_rd
.init_flags
|= FCOE_TASK_DEV_TYPE_DISK
<<
1464 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT
;
1465 task
->tx_wr_rx_rd
.init_flags
|= FCOE_TASK_CLASS_TYPE_3
<<
1466 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT
;
1469 task
->cmn
.data_2_trns
= io_req
->data_xfer_len
;
1470 context_id
= tgt
->context_id
;
1471 task
->cmn
.common_flags
= context_id
<<
1472 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT
;
1473 task
->cmn
.common_flags
|= 1 <<
1474 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT
;
1475 task
->cmn
.common_flags
|= 1 <<
1476 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT
;
1478 /* Set initiative ownership */
1479 task
->cmn
.common_flags
|= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT
;
1481 /* Set initial seq counter */
1482 task
->cmn
.tx_low_seq_cnt
= 1;
1484 /* Set state to "waiting for the first packet" */
1485 task
->cmn
.common_flags
|= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME
;
1487 /* Fill FCP_CMND IU */
1489 task
->cmn
.general
.cmd_info
.fcp_cmd_payload
.opaque
;
1490 bnx2fc_build_fcp_cmnd(io_req
, (struct fcp_cmnd
*)&tmp_fcp_cmnd
);
1493 cnt
= sizeof(struct fcp_cmnd
) / sizeof(u64
);
1495 for (i
= 0; i
< cnt
; i
++) {
1496 *fcp_cmnd
= cpu_to_be64(tmp_fcp_cmnd
[i
]);
1500 /* Rx Write Tx Read */
1501 task
->rx_wr_tx_rd
.rx_id
= 0xffff;
1504 if (task_type
== FCOE_TASK_TYPE_READ
) {
1506 bd_count
= bd_tbl
->bd_valid
;
1507 if (bd_count
== 1) {
1509 struct fcoe_bd_ctx
*fcoe_bd_tbl
= bd_tbl
->bd_tbl
;
1511 task
->rx_wr_only
.sgl_ctx
.single_sge
.cur_buf_addr
.lo
=
1512 fcoe_bd_tbl
->buf_addr_lo
;
1513 task
->rx_wr_only
.sgl_ctx
.single_sge
.cur_buf_addr
.hi
=
1514 fcoe_bd_tbl
->buf_addr_hi
;
1515 task
->rx_wr_only
.sgl_ctx
.single_sge
.cur_buf_rem
=
1516 fcoe_bd_tbl
->buf_len
;
1517 task
->tx_wr_rx_rd
.init_flags
|= 1 <<
1518 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT
;
1521 task
->rx_wr_only
.sgl_ctx
.mul_sges
.cur_sge_addr
.lo
=
1522 (u32
)bd_tbl
->bd_tbl_dma
;
1523 task
->rx_wr_only
.sgl_ctx
.mul_sges
.cur_sge_addr
.hi
=
1524 (u32
)((u64
)bd_tbl
->bd_tbl_dma
>> 32);
1525 task
->rx_wr_only
.sgl_ctx
.mul_sges
.sgl_size
=
1532 * bnx2fc_setup_task_ctx - allocate and map task context
1534 * @hba: pointer to adapter structure
1536 * allocate memory for task context, and associated BD table to be used
1540 int bnx2fc_setup_task_ctx(struct bnx2fc_hba
*hba
)
1543 struct regpair
*task_ctx_bdt
;
1548 * Allocate task context bd table. A page size of bd table
1549 * can map 256 buffers. Each buffer contains 32 task context
1550 * entries. Hence the limit with one page is 8192 task context
1553 hba
->task_ctx_bd_tbl
= dma_alloc_coherent(&hba
->pcidev
->dev
,
1555 &hba
->task_ctx_bd_dma
,
1557 if (!hba
->task_ctx_bd_tbl
) {
1558 printk(KERN_ERR PFX
"unable to allocate task context BDT\n");
1562 memset(hba
->task_ctx_bd_tbl
, 0, PAGE_SIZE
);
1565 * Allocate task_ctx which is an array of pointers pointing to
1566 * a page containing 32 task contexts
1568 hba
->task_ctx
= kzalloc((BNX2FC_TASK_CTX_ARR_SZ
* sizeof(void *)),
1570 if (!hba
->task_ctx
) {
1571 printk(KERN_ERR PFX
"unable to allocate task context array\n");
1577 * Allocate task_ctx_dma which is an array of dma addresses
1579 hba
->task_ctx_dma
= kmalloc((BNX2FC_TASK_CTX_ARR_SZ
*
1580 sizeof(dma_addr_t
)), GFP_KERNEL
);
1581 if (!hba
->task_ctx_dma
) {
1582 printk(KERN_ERR PFX
"unable to alloc context mapping array\n");
1587 task_ctx_bdt
= (struct regpair
*)hba
->task_ctx_bd_tbl
;
1588 for (i
= 0; i
< BNX2FC_TASK_CTX_ARR_SZ
; i
++) {
1590 hba
->task_ctx
[i
] = dma_alloc_coherent(&hba
->pcidev
->dev
,
1592 &hba
->task_ctx_dma
[i
],
1594 if (!hba
->task_ctx
[i
]) {
1595 printk(KERN_ERR PFX
"unable to alloc task context\n");
1599 memset(hba
->task_ctx
[i
], 0, PAGE_SIZE
);
1600 addr
= (u64
)hba
->task_ctx_dma
[i
];
1601 task_ctx_bdt
->hi
= cpu_to_le32((u64
)addr
>> 32);
1602 task_ctx_bdt
->lo
= cpu_to_le32((u32
)addr
);
1608 for (i
= 0; i
< BNX2FC_TASK_CTX_ARR_SZ
; i
++) {
1609 if (hba
->task_ctx
[i
]) {
1611 dma_free_coherent(&hba
->pcidev
->dev
, PAGE_SIZE
,
1612 hba
->task_ctx
[i
], hba
->task_ctx_dma
[i
]);
1613 hba
->task_ctx
[i
] = NULL
;
1617 kfree(hba
->task_ctx_dma
);
1618 hba
->task_ctx_dma
= NULL
;
1620 kfree(hba
->task_ctx
);
1621 hba
->task_ctx
= NULL
;
1623 dma_free_coherent(&hba
->pcidev
->dev
, PAGE_SIZE
,
1624 hba
->task_ctx_bd_tbl
, hba
->task_ctx_bd_dma
);
1625 hba
->task_ctx_bd_tbl
= NULL
;
1630 void bnx2fc_free_task_ctx(struct bnx2fc_hba
*hba
)
1634 if (hba
->task_ctx_bd_tbl
) {
1635 dma_free_coherent(&hba
->pcidev
->dev
, PAGE_SIZE
,
1636 hba
->task_ctx_bd_tbl
,
1637 hba
->task_ctx_bd_dma
);
1638 hba
->task_ctx_bd_tbl
= NULL
;
1641 if (hba
->task_ctx
) {
1642 for (i
= 0; i
< BNX2FC_TASK_CTX_ARR_SZ
; i
++) {
1643 if (hba
->task_ctx
[i
]) {
1644 dma_free_coherent(&hba
->pcidev
->dev
, PAGE_SIZE
,
1646 hba
->task_ctx_dma
[i
]);
1647 hba
->task_ctx
[i
] = NULL
;
1650 kfree(hba
->task_ctx
);
1651 hba
->task_ctx
= NULL
;
1654 kfree(hba
->task_ctx_dma
);
1655 hba
->task_ctx_dma
= NULL
;
1658 static void bnx2fc_free_hash_table(struct bnx2fc_hba
*hba
)
1662 int hash_table_size
;
1665 segment_count
= hba
->hash_tbl_segment_count
;
1666 hash_table_size
= BNX2FC_NUM_MAX_SESS
* BNX2FC_MAX_ROWS_IN_HASH_TBL
*
1667 sizeof(struct fcoe_hash_table_entry
);
1669 pbl
= hba
->hash_tbl_pbl
;
1670 for (i
= 0; i
< segment_count
; ++i
) {
1671 dma_addr_t dma_address
;
1673 dma_address
= le32_to_cpu(*pbl
);
1675 dma_address
+= ((u64
)le32_to_cpu(*pbl
)) << 32;
1677 dma_free_coherent(&hba
->pcidev
->dev
,
1678 BNX2FC_HASH_TBL_CHUNK_SIZE
,
1679 hba
->hash_tbl_segments
[i
],
1684 if (hba
->hash_tbl_pbl
) {
1685 dma_free_coherent(&hba
->pcidev
->dev
, PAGE_SIZE
,
1687 hba
->hash_tbl_pbl_dma
);
1688 hba
->hash_tbl_pbl
= NULL
;
1692 static int bnx2fc_allocate_hash_table(struct bnx2fc_hba
*hba
)
1695 int hash_table_size
;
1697 int segment_array_size
;
1698 int dma_segment_array_size
;
1699 dma_addr_t
*dma_segment_array
;
1702 hash_table_size
= BNX2FC_NUM_MAX_SESS
* BNX2FC_MAX_ROWS_IN_HASH_TBL
*
1703 sizeof(struct fcoe_hash_table_entry
);
1705 segment_count
= hash_table_size
+ BNX2FC_HASH_TBL_CHUNK_SIZE
- 1;
1706 segment_count
/= BNX2FC_HASH_TBL_CHUNK_SIZE
;
1707 hba
->hash_tbl_segment_count
= segment_count
;
1709 segment_array_size
= segment_count
* sizeof(*hba
->hash_tbl_segments
);
1710 hba
->hash_tbl_segments
= kzalloc(segment_array_size
, GFP_KERNEL
);
1711 if (!hba
->hash_tbl_segments
) {
1712 printk(KERN_ERR PFX
"hash table pointers alloc failed\n");
1715 dma_segment_array_size
= segment_count
* sizeof(*dma_segment_array
);
1716 dma_segment_array
= kzalloc(dma_segment_array_size
, GFP_KERNEL
);
1717 if (!dma_segment_array
) {
1718 printk(KERN_ERR PFX
"hash table pointers (dma) alloc failed\n");
1722 for (i
= 0; i
< segment_count
; ++i
) {
1723 hba
->hash_tbl_segments
[i
] =
1724 dma_alloc_coherent(&hba
->pcidev
->dev
,
1725 BNX2FC_HASH_TBL_CHUNK_SIZE
,
1726 &dma_segment_array
[i
],
1728 if (!hba
->hash_tbl_segments
[i
]) {
1729 printk(KERN_ERR PFX
"hash segment alloc failed\n");
1731 dma_free_coherent(&hba
->pcidev
->dev
,
1732 BNX2FC_HASH_TBL_CHUNK_SIZE
,
1733 hba
->hash_tbl_segments
[i
],
1734 dma_segment_array
[i
]);
1735 hba
->hash_tbl_segments
[i
] = NULL
;
1737 kfree(dma_segment_array
);
1740 memset(hba
->hash_tbl_segments
[i
], 0,
1741 BNX2FC_HASH_TBL_CHUNK_SIZE
);
1744 hba
->hash_tbl_pbl
= dma_alloc_coherent(&hba
->pcidev
->dev
,
1746 &hba
->hash_tbl_pbl_dma
,
1748 if (!hba
->hash_tbl_pbl
) {
1749 printk(KERN_ERR PFX
"hash table pbl alloc failed\n");
1750 kfree(dma_segment_array
);
1753 memset(hba
->hash_tbl_pbl
, 0, PAGE_SIZE
);
1755 pbl
= hba
->hash_tbl_pbl
;
1756 for (i
= 0; i
< segment_count
; ++i
) {
1757 u64 paddr
= dma_segment_array
[i
];
1758 *pbl
= cpu_to_le32((u32
) paddr
);
1760 *pbl
= cpu_to_le32((u32
) (paddr
>> 32));
1763 pbl
= hba
->hash_tbl_pbl
;
1765 while (*pbl
&& *(pbl
+ 1)) {
1774 kfree(dma_segment_array
);
1779 * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
1781 * @hba: Pointer to adapter structure
1784 int bnx2fc_setup_fw_resc(struct bnx2fc_hba
*hba
)
1790 if (bnx2fc_allocate_hash_table(hba
))
1793 mem_size
= BNX2FC_NUM_MAX_SESS
* sizeof(struct regpair
);
1794 hba
->t2_hash_tbl_ptr
= dma_alloc_coherent(&hba
->pcidev
->dev
, mem_size
,
1795 &hba
->t2_hash_tbl_ptr_dma
,
1797 if (!hba
->t2_hash_tbl_ptr
) {
1798 printk(KERN_ERR PFX
"unable to allocate t2 hash table ptr\n");
1799 bnx2fc_free_fw_resc(hba
);
1802 memset(hba
->t2_hash_tbl_ptr
, 0x00, mem_size
);
1804 mem_size
= BNX2FC_NUM_MAX_SESS
*
1805 sizeof(struct fcoe_t2_hash_table_entry
);
1806 hba
->t2_hash_tbl
= dma_alloc_coherent(&hba
->pcidev
->dev
, mem_size
,
1807 &hba
->t2_hash_tbl_dma
,
1809 if (!hba
->t2_hash_tbl
) {
1810 printk(KERN_ERR PFX
"unable to allocate t2 hash table\n");
1811 bnx2fc_free_fw_resc(hba
);
1814 memset(hba
->t2_hash_tbl
, 0x00, mem_size
);
1815 for (i
= 0; i
< BNX2FC_NUM_MAX_SESS
; i
++) {
1816 addr
= (unsigned long) hba
->t2_hash_tbl_dma
+
1817 ((i
+1) * sizeof(struct fcoe_t2_hash_table_entry
));
1818 hba
->t2_hash_tbl
[i
].next
.lo
= addr
& 0xffffffff;
1819 hba
->t2_hash_tbl
[i
].next
.hi
= addr
>> 32;
1822 hba
->dummy_buffer
= dma_alloc_coherent(&hba
->pcidev
->dev
,
1823 PAGE_SIZE
, &hba
->dummy_buf_dma
,
1825 if (!hba
->dummy_buffer
) {
1826 printk(KERN_ERR PFX
"unable to alloc MP Dummy Buffer\n");
1827 bnx2fc_free_fw_resc(hba
);
1831 hba
->stats_buffer
= dma_alloc_coherent(&hba
->pcidev
->dev
,
1833 &hba
->stats_buf_dma
,
1835 if (!hba
->stats_buffer
) {
1836 printk(KERN_ERR PFX
"unable to alloc Stats Buffer\n");
1837 bnx2fc_free_fw_resc(hba
);
1840 memset(hba
->stats_buffer
, 0x00, PAGE_SIZE
);
1845 void bnx2fc_free_fw_resc(struct bnx2fc_hba
*hba
)
1849 if (hba
->stats_buffer
) {
1850 dma_free_coherent(&hba
->pcidev
->dev
, PAGE_SIZE
,
1851 hba
->stats_buffer
, hba
->stats_buf_dma
);
1852 hba
->stats_buffer
= NULL
;
1855 if (hba
->dummy_buffer
) {
1856 dma_free_coherent(&hba
->pcidev
->dev
, PAGE_SIZE
,
1857 hba
->dummy_buffer
, hba
->dummy_buf_dma
);
1858 hba
->dummy_buffer
= NULL
;
1861 if (hba
->t2_hash_tbl_ptr
) {
1862 mem_size
= BNX2FC_NUM_MAX_SESS
* sizeof(struct regpair
);
1863 dma_free_coherent(&hba
->pcidev
->dev
, mem_size
,
1864 hba
->t2_hash_tbl_ptr
,
1865 hba
->t2_hash_tbl_ptr_dma
);
1866 hba
->t2_hash_tbl_ptr
= NULL
;
1869 if (hba
->t2_hash_tbl
) {
1870 mem_size
= BNX2FC_NUM_MAX_SESS
*
1871 sizeof(struct fcoe_t2_hash_table_entry
);
1872 dma_free_coherent(&hba
->pcidev
->dev
, mem_size
,
1873 hba
->t2_hash_tbl
, hba
->t2_hash_tbl_dma
);
1874 hba
->t2_hash_tbl
= NULL
;
1876 bnx2fc_free_hash_table(hba
);