1 /* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
3 * Copyright (c) 2006 - 2009 Broadcom Corporation
4 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
5 * Copyright (c) 2007, 2008 Mike Christie
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
11 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
14 #include <linux/gfp.h>
15 #include <scsi/scsi_tcq.h>
16 #include <scsi/libiscsi.h>
20 * bnx2i_get_cid_num - get cid from ep
21 * @ep: endpoint pointer
23 * Only applicable to 57710 family of devices
25 static u32
bnx2i_get_cid_num(struct bnx2i_endpoint
*ep
)
29 if (test_bit(BNX2I_NX2_DEV_57710
, &ep
->hba
->cnic_dev_type
))
32 cid
= GET_CID_NUM(ep
->ep_cid
);
38 * bnx2i_adjust_qp_size - Adjust SQ/RQ/CQ size for 57710 device type
39 * @hba: Adapter for which adjustments is to be made
41 * Only applicable to 57710 family of devices
43 static void bnx2i_adjust_qp_size(struct bnx2i_hba
*hba
)
45 u32 num_elements_per_pg
;
47 if (test_bit(BNX2I_NX2_DEV_5706
, &hba
->cnic_dev_type
) ||
48 test_bit(BNX2I_NX2_DEV_5708
, &hba
->cnic_dev_type
) ||
49 test_bit(BNX2I_NX2_DEV_5709
, &hba
->cnic_dev_type
)) {
50 if (!is_power_of_2(hba
->max_sqes
))
51 hba
->max_sqes
= rounddown_pow_of_two(hba
->max_sqes
);
53 if (!is_power_of_2(hba
->max_rqes
))
54 hba
->max_rqes
= rounddown_pow_of_two(hba
->max_rqes
);
57 /* Adjust each queue size if the user selection does not
58 * yield integral num of page buffers
61 num_elements_per_pg
= PAGE_SIZE
/ BNX2I_SQ_WQE_SIZE
;
62 if (hba
->max_sqes
< num_elements_per_pg
)
63 hba
->max_sqes
= num_elements_per_pg
;
64 else if (hba
->max_sqes
% num_elements_per_pg
)
65 hba
->max_sqes
= (hba
->max_sqes
+ num_elements_per_pg
- 1) &
66 ~(num_elements_per_pg
- 1);
69 num_elements_per_pg
= PAGE_SIZE
/ BNX2I_CQE_SIZE
;
70 if (hba
->max_cqes
< num_elements_per_pg
)
71 hba
->max_cqes
= num_elements_per_pg
;
72 else if (hba
->max_cqes
% num_elements_per_pg
)
73 hba
->max_cqes
= (hba
->max_cqes
+ num_elements_per_pg
- 1) &
74 ~(num_elements_per_pg
- 1);
77 num_elements_per_pg
= PAGE_SIZE
/ BNX2I_RQ_WQE_SIZE
;
78 if (hba
->max_rqes
< num_elements_per_pg
)
79 hba
->max_rqes
= num_elements_per_pg
;
80 else if (hba
->max_rqes
% num_elements_per_pg
)
81 hba
->max_rqes
= (hba
->max_rqes
+ num_elements_per_pg
- 1) &
82 ~(num_elements_per_pg
- 1);
87 * bnx2i_get_link_state - get network interface link state
88 * @hba: adapter instance pointer
90 * updates adapter structure flag based on netdev state
92 static void bnx2i_get_link_state(struct bnx2i_hba
*hba
)
94 if (test_bit(__LINK_STATE_NOCARRIER
, &hba
->netdev
->state
))
95 set_bit(ADAPTER_STATE_LINK_DOWN
, &hba
->adapter_state
);
97 clear_bit(ADAPTER_STATE_LINK_DOWN
, &hba
->adapter_state
);
102 * bnx2i_iscsi_license_error - displays iscsi license related error message
103 * @hba: adapter instance pointer
104 * @error_code: error classification
106 * Puts out an error log when driver is unable to offload iscsi connection
107 * due to license restrictions
109 static void bnx2i_iscsi_license_error(struct bnx2i_hba
*hba
, u32 error_code
)
111 if (error_code
== ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED
)
112 /* iSCSI offload not supported on this device */
113 printk(KERN_ERR
"bnx2i: iSCSI not supported, dev=%s\n",
115 if (error_code
== ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED
)
116 /* iSCSI offload not supported on this LOM device */
117 printk(KERN_ERR
"bnx2i: LOM is not enable to "
118 "offload iSCSI connections, dev=%s\n",
120 set_bit(ADAPTER_STATE_INIT_FAILED
, &hba
->adapter_state
);
125 * bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification
126 * @ep: endpoint (transport indentifier) structure
127 * @action: action, ARM or DISARM. For now only ARM_CQE is used
129 * Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt
130 * the driver. EQ event is generated CQ index is hit or at least 1 CQ is
131 * outstanding and on chip timer expires
133 void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint
*ep
, u8 action
)
135 struct bnx2i_5771x_cq_db
*cq_db
;
141 /* Coalesce CQ entries only on 10G devices */
142 if (!test_bit(BNX2I_NX2_DEV_57710
, &ep
->hba
->cnic_dev_type
))
145 /* Do not update CQ DB multiple times before firmware writes
146 * '0xFFFF' to CQDB->SQN field. Deviation may cause spurious
147 * interrupts and other unwanted results
149 cq_db
= (struct bnx2i_5771x_cq_db
*) ep
->qp
.cq_pgtbl_virt
;
150 if (cq_db
->sqn
[0] && cq_db
->sqn
[0] != 0xFFFF)
153 if (action
== CNIC_ARM_CQE
) {
154 num_active_cmds
= ep
->num_active_cmds
;
155 if (num_active_cmds
<= event_coal_min
)
158 next_index
= event_coal_min
+
159 (num_active_cmds
- event_coal_min
) / event_coal_div
;
162 cq_index
= ep
->qp
.cqe_exp_seq_sn
+ next_index
- 1;
163 if (cq_index
> ep
->qp
.cqe_size
* 2)
164 cq_index
-= ep
->qp
.cqe_size
* 2;
168 cq_db
->sqn
[0] = cq_index
;
174 * bnx2i_get_rq_buf - copy RQ buffer contents to driver buffer
175 * @conn: iscsi connection on which RQ event occured
176 * @ptr: driver buffer to which RQ buffer contents is to
178 * @len: length of valid data inside RQ buf
180 * Copies RQ buffer contents from shared (DMA'able) memory region to
181 * driver buffer. RQ is used to DMA unsolicitated iscsi pdu's and
184 void bnx2i_get_rq_buf(struct bnx2i_conn
*bnx2i_conn
, char *ptr
, int len
)
186 if (!bnx2i_conn
->ep
->qp
.rqe_left
)
189 bnx2i_conn
->ep
->qp
.rqe_left
--;
190 memcpy(ptr
, (u8
*) bnx2i_conn
->ep
->qp
.rq_cons_qe
, len
);
191 if (bnx2i_conn
->ep
->qp
.rq_cons_qe
== bnx2i_conn
->ep
->qp
.rq_last_qe
) {
192 bnx2i_conn
->ep
->qp
.rq_cons_qe
= bnx2i_conn
->ep
->qp
.rq_first_qe
;
193 bnx2i_conn
->ep
->qp
.rq_cons_idx
= 0;
195 bnx2i_conn
->ep
->qp
.rq_cons_qe
++;
196 bnx2i_conn
->ep
->qp
.rq_cons_idx
++;
201 static void bnx2i_ring_577xx_doorbell(struct bnx2i_conn
*conn
)
203 struct bnx2i_5771x_dbell dbell
;
206 memset(&dbell
, 0, sizeof(dbell
));
207 dbell
.dbell
.header
= (B577XX_ISCSI_CONNECTION_TYPE
<<
208 B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT
);
209 msg
= *((u32
*)&dbell
);
210 /* TODO : get doorbell register mapping */
211 writel(cpu_to_le32(msg
), conn
->ep
->qp
.ctx_base
);
216 * bnx2i_put_rq_buf - Replenish RQ buffer, if required ring on chip doorbell
217 * @conn: iscsi connection on which event to post
218 * @count: number of RQ buffer being posted to chip
220 * No need to ring hardware doorbell for 57710 family of devices
222 void bnx2i_put_rq_buf(struct bnx2i_conn
*bnx2i_conn
, int count
)
224 struct bnx2i_5771x_sq_rq_db
*rq_db
;
225 u16 hi_bit
= (bnx2i_conn
->ep
->qp
.rq_prod_idx
& 0x8000);
226 struct bnx2i_endpoint
*ep
= bnx2i_conn
->ep
;
228 ep
->qp
.rqe_left
+= count
;
229 ep
->qp
.rq_prod_idx
&= 0x7FFF;
230 ep
->qp
.rq_prod_idx
+= count
;
232 if (ep
->qp
.rq_prod_idx
> bnx2i_conn
->hba
->max_rqes
) {
233 ep
->qp
.rq_prod_idx
%= bnx2i_conn
->hba
->max_rqes
;
235 ep
->qp
.rq_prod_idx
|= 0x8000;
237 ep
->qp
.rq_prod_idx
|= hi_bit
;
239 if (test_bit(BNX2I_NX2_DEV_57710
, &ep
->hba
->cnic_dev_type
)) {
240 rq_db
= (struct bnx2i_5771x_sq_rq_db
*) ep
->qp
.rq_pgtbl_virt
;
241 rq_db
->prod_idx
= ep
->qp
.rq_prod_idx
;
242 /* no need to ring hardware doorbell for 57710 */
244 writew(ep
->qp
.rq_prod_idx
,
245 ep
->qp
.ctx_base
+ CNIC_RECV_DOORBELL
);
252 * bnx2i_ring_sq_dbell - Ring SQ doorbell to wake-up the processing engine
253 * @conn: iscsi connection to which new SQ entries belong
254 * @count: number of SQ WQEs to post
256 * SQ DB is updated in host memory and TX Doorbell is rung for 57710 family
257 * of devices. For 5706/5708/5709 new SQ WQE count is written into the
260 static void bnx2i_ring_sq_dbell(struct bnx2i_conn
*bnx2i_conn
, int count
)
262 struct bnx2i_5771x_sq_rq_db
*sq_db
;
263 struct bnx2i_endpoint
*ep
= bnx2i_conn
->ep
;
265 ep
->num_active_cmds
++;
266 wmb(); /* flush SQ WQE memory before the doorbell is rung */
267 if (test_bit(BNX2I_NX2_DEV_57710
, &ep
->hba
->cnic_dev_type
)) {
268 sq_db
= (struct bnx2i_5771x_sq_rq_db
*) ep
->qp
.sq_pgtbl_virt
;
269 sq_db
->prod_idx
= ep
->qp
.sq_prod_idx
;
270 bnx2i_ring_577xx_doorbell(bnx2i_conn
);
272 writew(count
, ep
->qp
.ctx_base
+ CNIC_SEND_DOORBELL
);
274 mmiowb(); /* flush posted PCI writes */
279 * bnx2i_ring_dbell_update_sq_params - update SQ driver parameters
280 * @conn: iscsi connection to which new SQ entries belong
281 * @count: number of SQ WQEs to post
283 * this routine will update SQ driver parameters and ring the doorbell
285 static void bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn
*bnx2i_conn
,
291 if (bnx2i_conn
->ep
->qp
.sq_prod_qe
==
292 bnx2i_conn
->ep
->qp
.sq_last_qe
)
293 bnx2i_conn
->ep
->qp
.sq_prod_qe
=
294 bnx2i_conn
->ep
->qp
.sq_first_qe
;
296 bnx2i_conn
->ep
->qp
.sq_prod_qe
++;
298 if ((bnx2i_conn
->ep
->qp
.sq_prod_qe
+ count
) <=
299 bnx2i_conn
->ep
->qp
.sq_last_qe
)
300 bnx2i_conn
->ep
->qp
.sq_prod_qe
+= count
;
302 tmp_cnt
= bnx2i_conn
->ep
->qp
.sq_last_qe
-
303 bnx2i_conn
->ep
->qp
.sq_prod_qe
;
304 bnx2i_conn
->ep
->qp
.sq_prod_qe
=
305 &bnx2i_conn
->ep
->qp
.sq_first_qe
[count
-
309 bnx2i_conn
->ep
->qp
.sq_prod_idx
+= count
;
310 /* Ring the doorbell */
311 bnx2i_ring_sq_dbell(bnx2i_conn
, bnx2i_conn
->ep
->qp
.sq_prod_idx
);
316 * bnx2i_send_iscsi_login - post iSCSI login request MP WQE to hardware
317 * @conn: iscsi connection
318 * @cmd: driver command structure which is requesting
319 * a WQE to sent to chip for further processing
321 * prepare and post an iSCSI Login request WQE to CNIC firmware
323 int bnx2i_send_iscsi_login(struct bnx2i_conn
*bnx2i_conn
,
324 struct iscsi_task
*task
)
326 struct bnx2i_cmd
*bnx2i_cmd
;
327 struct bnx2i_login_request
*login_wqe
;
328 struct iscsi_login
*login_hdr
;
331 bnx2i_cmd
= (struct bnx2i_cmd
*)task
->dd_data
;
332 login_hdr
= (struct iscsi_login
*)task
->hdr
;
333 login_wqe
= (struct bnx2i_login_request
*)
334 bnx2i_conn
->ep
->qp
.sq_prod_qe
;
336 login_wqe
->op_code
= login_hdr
->opcode
;
337 login_wqe
->op_attr
= login_hdr
->flags
;
338 login_wqe
->version_max
= login_hdr
->max_version
;
339 login_wqe
->version_min
= login_hdr
->min_version
;
340 login_wqe
->data_length
= ntoh24(login_hdr
->dlength
);
341 login_wqe
->isid_lo
= *((u32
*) login_hdr
->isid
);
342 login_wqe
->isid_hi
= *((u16
*) login_hdr
->isid
+ 2);
343 login_wqe
->tsih
= login_hdr
->tsih
;
344 login_wqe
->itt
= task
->itt
|
345 (ISCSI_TASK_TYPE_MPATH
<< ISCSI_LOGIN_REQUEST_TYPE_SHIFT
);
346 login_wqe
->cid
= login_hdr
->cid
;
348 login_wqe
->cmd_sn
= be32_to_cpu(login_hdr
->cmdsn
);
349 login_wqe
->exp_stat_sn
= be32_to_cpu(login_hdr
->exp_statsn
);
350 login_wqe
->flags
= ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN
;
352 login_wqe
->resp_bd_list_addr_lo
= (u32
) bnx2i_conn
->gen_pdu
.resp_bd_dma
;
353 login_wqe
->resp_bd_list_addr_hi
=
354 (u32
) ((u64
) bnx2i_conn
->gen_pdu
.resp_bd_dma
>> 32);
356 dword
= ((1 << ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT
) |
357 (bnx2i_conn
->gen_pdu
.resp_buf_size
<<
358 ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT
));
359 login_wqe
->resp_buffer
= dword
;
360 login_wqe
->bd_list_addr_lo
= (u32
) bnx2i_conn
->gen_pdu
.req_bd_dma
;
361 login_wqe
->bd_list_addr_hi
=
362 (u32
) ((u64
) bnx2i_conn
->gen_pdu
.req_bd_dma
>> 32);
363 login_wqe
->num_bds
= 1;
364 login_wqe
->cq_index
= 0; /* CQ# used for completion, 5771x only */
366 bnx2i_ring_dbell_update_sq_params(bnx2i_conn
, 1);
371 * bnx2i_send_iscsi_tmf - post iSCSI task management request MP WQE to hardware
372 * @conn: iscsi connection
373 * @mtask: driver command structure which is requesting
374 * a WQE to sent to chip for further processing
376 * prepare and post an iSCSI Login request WQE to CNIC firmware
378 int bnx2i_send_iscsi_tmf(struct bnx2i_conn
*bnx2i_conn
,
379 struct iscsi_task
*mtask
)
381 struct iscsi_conn
*conn
= bnx2i_conn
->cls_conn
->dd_data
;
382 struct iscsi_tm
*tmfabort_hdr
;
383 struct scsi_cmnd
*ref_sc
;
384 struct iscsi_task
*ctask
;
385 struct bnx2i_cmd
*bnx2i_cmd
;
386 struct bnx2i_tmf_request
*tmfabort_wqe
;
390 bnx2i_cmd
= (struct bnx2i_cmd
*)mtask
->dd_data
;
391 tmfabort_hdr
= (struct iscsi_tm
*)mtask
->hdr
;
392 tmfabort_wqe
= (struct bnx2i_tmf_request
*)
393 bnx2i_conn
->ep
->qp
.sq_prod_qe
;
395 tmfabort_wqe
->op_code
= tmfabort_hdr
->opcode
;
396 tmfabort_wqe
->op_attr
= 0;
397 tmfabort_wqe
->op_attr
=
398 ISCSI_TMF_REQUEST_ALWAYS_ONE
| ISCSI_TM_FUNC_ABORT_TASK
;
400 tmfabort_wqe
->itt
= (mtask
->itt
| (ISCSI_TASK_TYPE_MPATH
<< 14));
401 tmfabort_wqe
->reserved2
= 0;
402 tmfabort_wqe
->cmd_sn
= be32_to_cpu(tmfabort_hdr
->cmdsn
);
404 ctask
= iscsi_itt_to_task(conn
, tmfabort_hdr
->rtt
);
405 if (!ctask
|| !ctask
->sc
)
407 * the iscsi layer must have completed the cmd while this
410 * Note: In the case of a SCSI cmd timeout, the task's sc
411 * is still active; hence ctask->sc != 0
412 * In this case, the task must be aborted
418 /* Retrieve LUN directly from the ref_sc */
419 int_to_scsilun(ref_sc
->device
->lun
, (struct scsi_lun
*) scsi_lun
);
420 tmfabort_wqe
->lun
[0] = be32_to_cpu(scsi_lun
[0]);
421 tmfabort_wqe
->lun
[1] = be32_to_cpu(scsi_lun
[1]);
423 if (ref_sc
->sc_data_direction
== DMA_TO_DEVICE
)
424 dword
= (ISCSI_TASK_TYPE_WRITE
<< ISCSI_CMD_REQUEST_TYPE_SHIFT
);
426 dword
= (ISCSI_TASK_TYPE_READ
<< ISCSI_CMD_REQUEST_TYPE_SHIFT
);
427 tmfabort_wqe
->ref_itt
= (dword
| (tmfabort_hdr
->rtt
& ISCSI_ITT_MASK
));
428 tmfabort_wqe
->ref_cmd_sn
= be32_to_cpu(tmfabort_hdr
->refcmdsn
);
430 tmfabort_wqe
->bd_list_addr_lo
= (u32
) bnx2i_conn
->hba
->mp_bd_dma
;
431 tmfabort_wqe
->bd_list_addr_hi
= (u32
)
432 ((u64
) bnx2i_conn
->hba
->mp_bd_dma
>> 32);
433 tmfabort_wqe
->num_bds
= 1;
434 tmfabort_wqe
->cq_index
= 0; /* CQ# used for completion, 5771x only */
436 bnx2i_ring_dbell_update_sq_params(bnx2i_conn
, 1);
441 * bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware
442 * @conn: iscsi connection
443 * @cmd: driver command structure which is requesting
444 * a WQE to sent to chip for further processing
446 * prepare and post an iSCSI SCSI-CMD request WQE to CNIC firmware
448 int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn
*bnx2i_conn
,
449 struct bnx2i_cmd
*cmd
)
451 struct bnx2i_cmd_request
*scsi_cmd_wqe
;
453 scsi_cmd_wqe
= (struct bnx2i_cmd_request
*)
454 bnx2i_conn
->ep
->qp
.sq_prod_qe
;
455 memcpy(scsi_cmd_wqe
, &cmd
->req
, sizeof(struct bnx2i_cmd_request
));
456 scsi_cmd_wqe
->cq_index
= 0; /* CQ# used for completion, 5771x only */
458 bnx2i_ring_dbell_update_sq_params(bnx2i_conn
, 1);
463 * bnx2i_send_iscsi_nopout - post iSCSI NOPOUT request WQE to hardware
464 * @conn: iscsi connection
465 * @cmd: driver command structure which is requesting
466 * a WQE to sent to chip for further processing
467 * @ttt: TTT to be used when building pdu header
468 * @datap: payload buffer pointer
469 * @data_len: payload data length
470 * @unsol: indicated whether nopout pdu is unsolicited pdu or
471 * in response to target's NOPIN w/ TTT != FFFFFFFF
473 * prepare and post a nopout request WQE to CNIC firmware
475 int bnx2i_send_iscsi_nopout(struct bnx2i_conn
*bnx2i_conn
,
476 struct iscsi_task
*task
, u32 ttt
,
477 char *datap
, int data_len
, int unsol
)
479 struct bnx2i_endpoint
*ep
= bnx2i_conn
->ep
;
480 struct bnx2i_cmd
*bnx2i_cmd
;
481 struct bnx2i_nop_out_request
*nopout_wqe
;
482 struct iscsi_nopout
*nopout_hdr
;
484 bnx2i_cmd
= (struct bnx2i_cmd
*)task
->dd_data
;
485 nopout_hdr
= (struct iscsi_nopout
*)task
->hdr
;
486 nopout_wqe
= (struct bnx2i_nop_out_request
*)ep
->qp
.sq_prod_qe
;
487 nopout_wqe
->op_code
= nopout_hdr
->opcode
;
488 nopout_wqe
->op_attr
= ISCSI_FLAG_CMD_FINAL
;
489 memcpy(nopout_wqe
->lun
, nopout_hdr
->lun
, 8);
491 if (test_bit(BNX2I_NX2_DEV_57710
, &ep
->hba
->cnic_dev_type
)) {
492 u32 tmp
= nopout_hdr
->lun
[0];
493 /* 57710 requires LUN field to be swapped */
494 nopout_hdr
->lun
[0] = nopout_hdr
->lun
[1];
495 nopout_hdr
->lun
[1] = tmp
;
498 nopout_wqe
->itt
= ((u16
)task
->itt
|
499 (ISCSI_TASK_TYPE_MPATH
<<
500 ISCSI_TMF_REQUEST_TYPE_SHIFT
));
501 nopout_wqe
->ttt
= ttt
;
502 nopout_wqe
->flags
= 0;
504 nopout_wqe
->flags
= ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION
;
505 else if (nopout_hdr
->itt
== RESERVED_ITT
)
506 nopout_wqe
->flags
= ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION
;
508 nopout_wqe
->cmd_sn
= be32_to_cpu(nopout_hdr
->cmdsn
);
509 nopout_wqe
->data_length
= data_len
;
511 /* handle payload data, not required in first release */
512 printk(KERN_ALERT
"NOPOUT: WARNING!! payload len != 0\n");
514 nopout_wqe
->bd_list_addr_lo
= (u32
)
515 bnx2i_conn
->hba
->mp_bd_dma
;
516 nopout_wqe
->bd_list_addr_hi
=
517 (u32
) ((u64
) bnx2i_conn
->hba
->mp_bd_dma
>> 32);
518 nopout_wqe
->num_bds
= 1;
520 nopout_wqe
->cq_index
= 0; /* CQ# used for completion, 5771x only */
522 bnx2i_ring_dbell_update_sq_params(bnx2i_conn
, 1);
528 * bnx2i_send_iscsi_logout - post iSCSI logout request WQE to hardware
529 * @conn: iscsi connection
530 * @cmd: driver command structure which is requesting
531 * a WQE to sent to chip for further processing
533 * prepare and post logout request WQE to CNIC firmware
535 int bnx2i_send_iscsi_logout(struct bnx2i_conn
*bnx2i_conn
,
536 struct iscsi_task
*task
)
538 struct bnx2i_cmd
*bnx2i_cmd
;
539 struct bnx2i_logout_request
*logout_wqe
;
540 struct iscsi_logout
*logout_hdr
;
542 bnx2i_cmd
= (struct bnx2i_cmd
*)task
->dd_data
;
543 logout_hdr
= (struct iscsi_logout
*)task
->hdr
;
545 logout_wqe
= (struct bnx2i_logout_request
*)
546 bnx2i_conn
->ep
->qp
.sq_prod_qe
;
547 memset(logout_wqe
, 0x00, sizeof(struct bnx2i_logout_request
));
549 logout_wqe
->op_code
= logout_hdr
->opcode
;
550 logout_wqe
->cmd_sn
= be32_to_cpu(logout_hdr
->cmdsn
);
551 logout_wqe
->op_attr
=
552 logout_hdr
->flags
| ISCSI_LOGOUT_REQUEST_ALWAYS_ONE
;
553 logout_wqe
->itt
= ((u16
)task
->itt
|
554 (ISCSI_TASK_TYPE_MPATH
<<
555 ISCSI_LOGOUT_REQUEST_TYPE_SHIFT
));
556 logout_wqe
->data_length
= 0;
559 logout_wqe
->bd_list_addr_lo
= (u32
) bnx2i_conn
->hba
->mp_bd_dma
;
560 logout_wqe
->bd_list_addr_hi
= (u32
)
561 ((u64
) bnx2i_conn
->hba
->mp_bd_dma
>> 32);
562 logout_wqe
->num_bds
= 1;
563 logout_wqe
->cq_index
= 0; /* CQ# used for completion, 5771x only */
565 bnx2i_ring_dbell_update_sq_params(bnx2i_conn
, 1);
571 * bnx2i_update_iscsi_conn - post iSCSI logout request WQE to hardware
572 * @conn: iscsi connection which requires iscsi parameter update
574 * sends down iSCSI Conn Update request to move iSCSI conn to FFP
576 void bnx2i_update_iscsi_conn(struct iscsi_conn
*conn
)
578 struct bnx2i_conn
*bnx2i_conn
= conn
->dd_data
;
579 struct bnx2i_hba
*hba
= bnx2i_conn
->hba
;
580 struct kwqe
*kwqe_arr
[2];
581 struct iscsi_kwqe_conn_update
*update_wqe
;
582 struct iscsi_kwqe_conn_update conn_update_kwqe
;
584 update_wqe
= &conn_update_kwqe
;
586 update_wqe
->hdr
.op_code
= ISCSI_KWQE_OPCODE_UPDATE_CONN
;
587 update_wqe
->hdr
.flags
=
588 (ISCSI_KWQE_LAYER_CODE
<< ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT
);
590 /* 5771x requires conn context id to be passed as is */
591 if (test_bit(BNX2I_NX2_DEV_57710
, &bnx2i_conn
->ep
->hba
->cnic_dev_type
))
592 update_wqe
->context_id
= bnx2i_conn
->ep
->ep_cid
;
594 update_wqe
->context_id
= (bnx2i_conn
->ep
->ep_cid
>> 7);
595 update_wqe
->conn_flags
= 0;
596 if (conn
->hdrdgst_en
)
597 update_wqe
->conn_flags
|= ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST
;
598 if (conn
->datadgst_en
)
599 update_wqe
->conn_flags
|= ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST
;
600 if (conn
->session
->initial_r2t_en
)
601 update_wqe
->conn_flags
|= ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T
;
602 if (conn
->session
->imm_data_en
)
603 update_wqe
->conn_flags
|= ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA
;
605 update_wqe
->max_send_pdu_length
= conn
->max_xmit_dlength
;
606 update_wqe
->max_recv_pdu_length
= conn
->max_recv_dlength
;
607 update_wqe
->first_burst_length
= conn
->session
->first_burst
;
608 update_wqe
->max_burst_length
= conn
->session
->max_burst
;
609 update_wqe
->exp_stat_sn
= conn
->exp_statsn
;
610 update_wqe
->max_outstanding_r2ts
= conn
->session
->max_r2t
;
611 update_wqe
->session_error_recovery_level
= conn
->session
->erl
;
612 iscsi_conn_printk(KERN_ALERT
, conn
,
613 "bnx2i: conn update - MBL 0x%x FBL 0x%x"
614 "MRDSL_I 0x%x MRDSL_T 0x%x \n",
615 update_wqe
->max_burst_length
,
616 update_wqe
->first_burst_length
,
617 update_wqe
->max_recv_pdu_length
,
618 update_wqe
->max_send_pdu_length
);
620 kwqe_arr
[0] = (struct kwqe
*) update_wqe
;
621 if (hba
->cnic
&& hba
->cnic
->submit_kwqes
)
622 hba
->cnic
->submit_kwqes(hba
->cnic
, kwqe_arr
, 1);
627 * bnx2i_ep_ofld_timer - post iSCSI logout request WQE to hardware
628 * @data: endpoint (transport handle) structure pointer
630 * routine to handle connection offload/destroy request timeout
632 void bnx2i_ep_ofld_timer(unsigned long data
)
634 struct bnx2i_endpoint
*ep
= (struct bnx2i_endpoint
*) data
;
636 if (ep
->state
== EP_STATE_OFLD_START
) {
637 printk(KERN_ALERT
"ofld_timer: CONN_OFLD timeout\n");
638 ep
->state
= EP_STATE_OFLD_FAILED
;
639 } else if (ep
->state
== EP_STATE_DISCONN_START
) {
640 printk(KERN_ALERT
"ofld_timer: CONN_DISCON timeout\n");
641 ep
->state
= EP_STATE_DISCONN_TIMEDOUT
;
642 } else if (ep
->state
== EP_STATE_CLEANUP_START
) {
643 printk(KERN_ALERT
"ofld_timer: CONN_CLEANUP timeout\n");
644 ep
->state
= EP_STATE_CLEANUP_FAILED
;
647 wake_up_interruptible(&ep
->ofld_wait
);
651 static int bnx2i_power_of2(u32 val
)
666 * bnx2i_send_cmd_cleanup_req - send iscsi cmd context clean-up request
667 * @hba: adapter structure pointer
668 * @cmd: driver command structure which is requesting
669 * a WQE to sent to chip for further processing
671 * prepares and posts CONN_OFLD_REQ1/2 KWQE
673 void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba
*hba
, struct bnx2i_cmd
*cmd
)
675 struct bnx2i_cleanup_request
*cmd_cleanup
;
678 (struct bnx2i_cleanup_request
*)cmd
->conn
->ep
->qp
.sq_prod_qe
;
679 memset(cmd_cleanup
, 0x00, sizeof(struct bnx2i_cleanup_request
));
681 cmd_cleanup
->op_code
= ISCSI_OPCODE_CLEANUP_REQUEST
;
682 cmd_cleanup
->itt
= cmd
->req
.itt
;
683 cmd_cleanup
->cq_index
= 0; /* CQ# used for completion, 5771x only */
685 bnx2i_ring_dbell_update_sq_params(cmd
->conn
, 1);
690 * bnx2i_send_conn_destroy - initiates iscsi connection teardown process
691 * @hba: adapter structure pointer
692 * @ep: endpoint (transport indentifier) structure
694 * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate
695 * iscsi connection context clean-up process
697 void bnx2i_send_conn_destroy(struct bnx2i_hba
*hba
, struct bnx2i_endpoint
*ep
)
699 struct kwqe
*kwqe_arr
[2];
700 struct iscsi_kwqe_conn_destroy conn_cleanup
;
702 memset(&conn_cleanup
, 0x00, sizeof(struct iscsi_kwqe_conn_destroy
));
704 conn_cleanup
.hdr
.op_code
= ISCSI_KWQE_OPCODE_DESTROY_CONN
;
705 conn_cleanup
.hdr
.flags
=
706 (ISCSI_KWQE_LAYER_CODE
<< ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT
);
707 /* 5771x requires conn context id to be passed as is */
708 if (test_bit(BNX2I_NX2_DEV_57710
, &ep
->hba
->cnic_dev_type
))
709 conn_cleanup
.context_id
= ep
->ep_cid
;
711 conn_cleanup
.context_id
= (ep
->ep_cid
>> 7);
713 conn_cleanup
.reserved0
= (u16
)ep
->ep_iscsi_cid
;
715 kwqe_arr
[0] = (struct kwqe
*) &conn_cleanup
;
716 if (hba
->cnic
&& hba
->cnic
->submit_kwqes
)
717 hba
->cnic
->submit_kwqes(hba
->cnic
, kwqe_arr
, 1);
722 * bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process
723 * @hba: adapter structure pointer
724 * @ep: endpoint (transport indentifier) structure
726 * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
728 static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba
*hba
,
729 struct bnx2i_endpoint
*ep
)
731 struct kwqe
*kwqe_arr
[2];
732 struct iscsi_kwqe_conn_offload1 ofld_req1
;
733 struct iscsi_kwqe_conn_offload2 ofld_req2
;
738 ofld_req1
.hdr
.op_code
= ISCSI_KWQE_OPCODE_OFFLOAD_CONN1
;
739 ofld_req1
.hdr
.flags
=
740 (ISCSI_KWQE_LAYER_CODE
<< ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT
);
742 ofld_req1
.iscsi_conn_id
= (u16
) ep
->ep_iscsi_cid
;
744 dma_addr
= ep
->qp
.sq_pgtbl_phys
;
745 ofld_req1
.sq_page_table_addr_lo
= (u32
) dma_addr
;
746 ofld_req1
.sq_page_table_addr_hi
= (u32
) ((u64
) dma_addr
>> 32);
748 dma_addr
= ep
->qp
.cq_pgtbl_phys
;
749 ofld_req1
.cq_page_table_addr_lo
= (u32
) dma_addr
;
750 ofld_req1
.cq_page_table_addr_hi
= (u32
) ((u64
) dma_addr
>> 32);
752 ofld_req2
.hdr
.op_code
= ISCSI_KWQE_OPCODE_OFFLOAD_CONN2
;
753 ofld_req2
.hdr
.flags
=
754 (ISCSI_KWQE_LAYER_CODE
<< ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT
);
756 dma_addr
= ep
->qp
.rq_pgtbl_phys
;
757 ofld_req2
.rq_page_table_addr_lo
= (u32
) dma_addr
;
758 ofld_req2
.rq_page_table_addr_hi
= (u32
) ((u64
) dma_addr
>> 32);
760 ptbl
= (u32
*) ep
->qp
.sq_pgtbl_virt
;
762 ofld_req2
.sq_first_pte
.hi
= *ptbl
++;
763 ofld_req2
.sq_first_pte
.lo
= *ptbl
;
765 ptbl
= (u32
*) ep
->qp
.cq_pgtbl_virt
;
766 ofld_req2
.cq_first_pte
.hi
= *ptbl
++;
767 ofld_req2
.cq_first_pte
.lo
= *ptbl
;
769 kwqe_arr
[0] = (struct kwqe
*) &ofld_req1
;
770 kwqe_arr
[1] = (struct kwqe
*) &ofld_req2
;
771 ofld_req2
.num_additional_wqes
= 0;
773 if (hba
->cnic
&& hba
->cnic
->submit_kwqes
)
774 hba
->cnic
->submit_kwqes(hba
->cnic
, kwqe_arr
, num_kwqes
);
779 * bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation
780 * @hba: adapter structure pointer
781 * @ep: endpoint (transport indentifier) structure
783 * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
785 static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba
*hba
,
786 struct bnx2i_endpoint
*ep
)
788 struct kwqe
*kwqe_arr
[5];
789 struct iscsi_kwqe_conn_offload1 ofld_req1
;
790 struct iscsi_kwqe_conn_offload2 ofld_req2
;
791 struct iscsi_kwqe_conn_offload3 ofld_req3
[1];
796 ofld_req1
.hdr
.op_code
= ISCSI_KWQE_OPCODE_OFFLOAD_CONN1
;
797 ofld_req1
.hdr
.flags
=
798 (ISCSI_KWQE_LAYER_CODE
<< ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT
);
800 ofld_req1
.iscsi_conn_id
= (u16
) ep
->ep_iscsi_cid
;
802 dma_addr
= ep
->qp
.sq_pgtbl_phys
+ ISCSI_SQ_DB_SIZE
;
803 ofld_req1
.sq_page_table_addr_lo
= (u32
) dma_addr
;
804 ofld_req1
.sq_page_table_addr_hi
= (u32
) ((u64
) dma_addr
>> 32);
806 dma_addr
= ep
->qp
.cq_pgtbl_phys
+ ISCSI_CQ_DB_SIZE
;
807 ofld_req1
.cq_page_table_addr_lo
= (u32
) dma_addr
;
808 ofld_req1
.cq_page_table_addr_hi
= (u32
) ((u64
) dma_addr
>> 32);
810 ofld_req2
.hdr
.op_code
= ISCSI_KWQE_OPCODE_OFFLOAD_CONN2
;
811 ofld_req2
.hdr
.flags
=
812 (ISCSI_KWQE_LAYER_CODE
<< ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT
);
814 dma_addr
= ep
->qp
.rq_pgtbl_phys
+ ISCSI_RQ_DB_SIZE
;
815 ofld_req2
.rq_page_table_addr_lo
= (u32
) dma_addr
;
816 ofld_req2
.rq_page_table_addr_hi
= (u32
) ((u64
) dma_addr
>> 32);
818 ptbl
= (u32
*)((u8
*)ep
->qp
.sq_pgtbl_virt
+ ISCSI_SQ_DB_SIZE
);
819 ofld_req2
.sq_first_pte
.hi
= *ptbl
++;
820 ofld_req2
.sq_first_pte
.lo
= *ptbl
;
822 ptbl
= (u32
*)((u8
*)ep
->qp
.cq_pgtbl_virt
+ ISCSI_CQ_DB_SIZE
);
823 ofld_req2
.cq_first_pte
.hi
= *ptbl
++;
824 ofld_req2
.cq_first_pte
.lo
= *ptbl
;
826 kwqe_arr
[0] = (struct kwqe
*) &ofld_req1
;
827 kwqe_arr
[1] = (struct kwqe
*) &ofld_req2
;
829 ofld_req2
.num_additional_wqes
= 1;
830 memset(ofld_req3
, 0x00, sizeof(ofld_req3
[0]));
831 ptbl
= (u32
*)((u8
*)ep
->qp
.rq_pgtbl_virt
+ ISCSI_RQ_DB_SIZE
);
832 ofld_req3
[0].qp_first_pte
[0].hi
= *ptbl
++;
833 ofld_req3
[0].qp_first_pte
[0].lo
= *ptbl
;
835 kwqe_arr
[2] = (struct kwqe
*) ofld_req3
;
836 /* need if we decide to go with multiple KCQE's per conn */
839 if (hba
->cnic
&& hba
->cnic
->submit_kwqes
)
840 hba
->cnic
->submit_kwqes(hba
->cnic
, kwqe_arr
, num_kwqes
);
844 * bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process
846 * @hba: adapter structure pointer
847 * @ep: endpoint (transport indentifier) structure
849 * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE
851 void bnx2i_send_conn_ofld_req(struct bnx2i_hba
*hba
, struct bnx2i_endpoint
*ep
)
853 if (test_bit(BNX2I_NX2_DEV_57710
, &hba
->cnic_dev_type
))
854 bnx2i_5771x_send_conn_ofld_req(hba
, ep
);
856 bnx2i_570x_send_conn_ofld_req(hba
, ep
);
861 * setup_qp_page_tables - iscsi QP page table setup function
862 * @ep: endpoint (transport indentifier) structure
864 * Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires
865 * 64-bit address in big endian format. Whereas 10G/sec (57710) requires
866 * PT in little endian format
868 static void setup_qp_page_tables(struct bnx2i_endpoint
*ep
)
875 if (test_bit(BNX2I_NX2_DEV_57710
, &ep
->hba
->cnic_dev_type
))
881 memset(ep
->qp
.sq_pgtbl_virt
, 0, ep
->qp
.sq_pgtbl_size
);
882 num_pages
= ep
->qp
.sq_mem_size
/ PAGE_SIZE
;
883 page
= ep
->qp
.sq_phys
;
886 ptbl
= (u32
*)((u8
*)ep
->qp
.sq_pgtbl_virt
+ ISCSI_SQ_DB_SIZE
);
888 ptbl
= (u32
*) ep
->qp
.sq_pgtbl_virt
;
889 while (num_pages
--) {
891 /* PTE is written in little endian format for 57710 */
894 *ptbl
= (u32
) ((u64
) page
>> 32);
898 /* PTE is written in big endian format for
899 * 5706/5708/5709 devices */
900 *ptbl
= (u32
) ((u64
) page
>> 32);
909 memset(ep
->qp
.rq_pgtbl_virt
, 0, ep
->qp
.rq_pgtbl_size
);
910 num_pages
= ep
->qp
.rq_mem_size
/ PAGE_SIZE
;
911 page
= ep
->qp
.rq_phys
;
914 ptbl
= (u32
*)((u8
*)ep
->qp
.rq_pgtbl_virt
+ ISCSI_RQ_DB_SIZE
);
916 ptbl
= (u32
*) ep
->qp
.rq_pgtbl_virt
;
917 while (num_pages
--) {
919 /* PTE is written in little endian format for 57710 */
922 *ptbl
= (u32
) ((u64
) page
>> 32);
926 /* PTE is written in big endian format for
927 * 5706/5708/5709 devices */
928 *ptbl
= (u32
) ((u64
) page
>> 32);
937 memset(ep
->qp
.cq_pgtbl_virt
, 0, ep
->qp
.cq_pgtbl_size
);
938 num_pages
= ep
->qp
.cq_mem_size
/ PAGE_SIZE
;
939 page
= ep
->qp
.cq_phys
;
942 ptbl
= (u32
*)((u8
*)ep
->qp
.cq_pgtbl_virt
+ ISCSI_CQ_DB_SIZE
);
944 ptbl
= (u32
*) ep
->qp
.cq_pgtbl_virt
;
945 while (num_pages
--) {
947 /* PTE is written in little endian format for 57710 */
950 *ptbl
= (u32
) ((u64
) page
>> 32);
954 /* PTE is written in big endian format for
955 * 5706/5708/5709 devices */
956 *ptbl
= (u32
) ((u64
) page
>> 32);
967 * bnx2i_alloc_qp_resc - allocates required resources for QP.
968 * @hba: adapter structure pointer
969 * @ep: endpoint (transport indentifier) structure
971 * Allocate QP (transport layer for iSCSI connection) resources, DMA'able
972 * memory for SQ/RQ/CQ and page tables. EP structure elements such
973 * as producer/consumer indexes/pointers, queue sizes and page table
976 int bnx2i_alloc_qp_resc(struct bnx2i_hba
*hba
, struct bnx2i_endpoint
*ep
)
978 struct bnx2i_5771x_cq_db
*cq_db
;
982 ep
->ep_cid
= ep
->ep_iscsi_cid
= ep
->ep_pg_cid
= 0;
984 /* Allocate page table memory for SQ which is page aligned */
985 ep
->qp
.sq_mem_size
= hba
->max_sqes
* BNX2I_SQ_WQE_SIZE
;
987 (ep
->qp
.sq_mem_size
+ (PAGE_SIZE
- 1)) & PAGE_MASK
;
988 ep
->qp
.sq_pgtbl_size
=
989 (ep
->qp
.sq_mem_size
/ PAGE_SIZE
) * sizeof(void *);
990 ep
->qp
.sq_pgtbl_size
=
991 (ep
->qp
.sq_pgtbl_size
+ (PAGE_SIZE
- 1)) & PAGE_MASK
;
993 ep
->qp
.sq_pgtbl_virt
=
994 dma_alloc_coherent(&hba
->pcidev
->dev
, ep
->qp
.sq_pgtbl_size
,
995 &ep
->qp
.sq_pgtbl_phys
, GFP_KERNEL
);
996 if (!ep
->qp
.sq_pgtbl_virt
) {
997 printk(KERN_ALERT
"bnx2i: unable to alloc SQ PT mem (%d)\n",
998 ep
->qp
.sq_pgtbl_size
);
1002 /* Allocate memory area for actual SQ element */
1004 dma_alloc_coherent(&hba
->pcidev
->dev
, ep
->qp
.sq_mem_size
,
1005 &ep
->qp
.sq_phys
, GFP_KERNEL
);
1006 if (!ep
->qp
.sq_virt
) {
1007 printk(KERN_ALERT
"bnx2i: unable to alloc SQ BD memory %d\n",
1008 ep
->qp
.sq_mem_size
);
1012 memset(ep
->qp
.sq_virt
, 0x00, ep
->qp
.sq_mem_size
);
1013 ep
->qp
.sq_first_qe
= ep
->qp
.sq_virt
;
1014 ep
->qp
.sq_prod_qe
= ep
->qp
.sq_first_qe
;
1015 ep
->qp
.sq_cons_qe
= ep
->qp
.sq_first_qe
;
1016 ep
->qp
.sq_last_qe
= &ep
->qp
.sq_first_qe
[hba
->max_sqes
- 1];
1017 ep
->qp
.sq_prod_idx
= 0;
1018 ep
->qp
.sq_cons_idx
= 0;
1019 ep
->qp
.sqe_left
= hba
->max_sqes
;
1021 /* Allocate page table memory for CQ which is page aligned */
1022 ep
->qp
.cq_mem_size
= hba
->max_cqes
* BNX2I_CQE_SIZE
;
1023 ep
->qp
.cq_mem_size
=
1024 (ep
->qp
.cq_mem_size
+ (PAGE_SIZE
- 1)) & PAGE_MASK
;
1025 ep
->qp
.cq_pgtbl_size
=
1026 (ep
->qp
.cq_mem_size
/ PAGE_SIZE
) * sizeof(void *);
1027 ep
->qp
.cq_pgtbl_size
=
1028 (ep
->qp
.cq_pgtbl_size
+ (PAGE_SIZE
- 1)) & PAGE_MASK
;
1030 ep
->qp
.cq_pgtbl_virt
=
1031 dma_alloc_coherent(&hba
->pcidev
->dev
, ep
->qp
.cq_pgtbl_size
,
1032 &ep
->qp
.cq_pgtbl_phys
, GFP_KERNEL
);
1033 if (!ep
->qp
.cq_pgtbl_virt
) {
1034 printk(KERN_ALERT
"bnx2i: unable to alloc CQ PT memory %d\n",
1035 ep
->qp
.cq_pgtbl_size
);
1039 /* Allocate memory area for actual CQ element */
1041 dma_alloc_coherent(&hba
->pcidev
->dev
, ep
->qp
.cq_mem_size
,
1042 &ep
->qp
.cq_phys
, GFP_KERNEL
);
1043 if (!ep
->qp
.cq_virt
) {
1044 printk(KERN_ALERT
"bnx2i: unable to alloc CQ BD memory %d\n",
1045 ep
->qp
.cq_mem_size
);
1048 memset(ep
->qp
.cq_virt
, 0x00, ep
->qp
.cq_mem_size
);
1050 ep
->qp
.cq_first_qe
= ep
->qp
.cq_virt
;
1051 ep
->qp
.cq_prod_qe
= ep
->qp
.cq_first_qe
;
1052 ep
->qp
.cq_cons_qe
= ep
->qp
.cq_first_qe
;
1053 ep
->qp
.cq_last_qe
= &ep
->qp
.cq_first_qe
[hba
->max_cqes
- 1];
1054 ep
->qp
.cq_prod_idx
= 0;
1055 ep
->qp
.cq_cons_idx
= 0;
1056 ep
->qp
.cqe_left
= hba
->max_cqes
;
1057 ep
->qp
.cqe_exp_seq_sn
= ISCSI_INITIAL_SN
;
1058 ep
->qp
.cqe_size
= hba
->max_cqes
;
1060 /* Invalidate all EQ CQE index, req only for 57710 */
1061 cq_db
= (struct bnx2i_5771x_cq_db
*) ep
->qp
.cq_pgtbl_virt
;
1062 memset(cq_db
->sqn
, 0xFF, sizeof(cq_db
->sqn
[0]) * BNX2X_MAX_CQS
);
1064 /* Allocate page table memory for RQ which is page aligned */
1065 ep
->qp
.rq_mem_size
= hba
->max_rqes
* BNX2I_RQ_WQE_SIZE
;
1066 ep
->qp
.rq_mem_size
=
1067 (ep
->qp
.rq_mem_size
+ (PAGE_SIZE
- 1)) & PAGE_MASK
;
1068 ep
->qp
.rq_pgtbl_size
=
1069 (ep
->qp
.rq_mem_size
/ PAGE_SIZE
) * sizeof(void *);
1070 ep
->qp
.rq_pgtbl_size
=
1071 (ep
->qp
.rq_pgtbl_size
+ (PAGE_SIZE
- 1)) & PAGE_MASK
;
1073 ep
->qp
.rq_pgtbl_virt
=
1074 dma_alloc_coherent(&hba
->pcidev
->dev
, ep
->qp
.rq_pgtbl_size
,
1075 &ep
->qp
.rq_pgtbl_phys
, GFP_KERNEL
);
1076 if (!ep
->qp
.rq_pgtbl_virt
) {
1077 printk(KERN_ALERT
"bnx2i: unable to alloc RQ PT mem %d\n",
1078 ep
->qp
.rq_pgtbl_size
);
1082 /* Allocate memory area for actual RQ element */
1084 dma_alloc_coherent(&hba
->pcidev
->dev
, ep
->qp
.rq_mem_size
,
1085 &ep
->qp
.rq_phys
, GFP_KERNEL
);
1086 if (!ep
->qp
.rq_virt
) {
1087 printk(KERN_ALERT
"bnx2i: unable to alloc RQ BD memory %d\n",
1088 ep
->qp
.rq_mem_size
);
1092 ep
->qp
.rq_first_qe
= ep
->qp
.rq_virt
;
1093 ep
->qp
.rq_prod_qe
= ep
->qp
.rq_first_qe
;
1094 ep
->qp
.rq_cons_qe
= ep
->qp
.rq_first_qe
;
1095 ep
->qp
.rq_last_qe
= &ep
->qp
.rq_first_qe
[hba
->max_rqes
- 1];
1096 ep
->qp
.rq_prod_idx
= 0x8000;
1097 ep
->qp
.rq_cons_idx
= 0;
1098 ep
->qp
.rqe_left
= hba
->max_rqes
;
1100 setup_qp_page_tables(ep
);
1105 bnx2i_free_qp_resc(hba
, ep
);
1112 * bnx2i_free_qp_resc - free memory resources held by QP
1113 * @hba: adapter structure pointer
1114 * @ep: endpoint (transport indentifier) structure
1116 * Free QP resources - SQ/RQ/CQ memory and page tables.
1118 void bnx2i_free_qp_resc(struct bnx2i_hba
*hba
, struct bnx2i_endpoint
*ep
)
1120 if (ep
->qp
.ctx_base
) {
1121 iounmap(ep
->qp
.ctx_base
);
1122 ep
->qp
.ctx_base
= NULL
;
1125 if (ep
->qp
.sq_pgtbl_virt
) {
1126 dma_free_coherent(&hba
->pcidev
->dev
, ep
->qp
.sq_pgtbl_size
,
1127 ep
->qp
.sq_pgtbl_virt
, ep
->qp
.sq_pgtbl_phys
);
1128 ep
->qp
.sq_pgtbl_virt
= NULL
;
1129 ep
->qp
.sq_pgtbl_phys
= 0;
1131 if (ep
->qp
.sq_virt
) {
1132 dma_free_coherent(&hba
->pcidev
->dev
, ep
->qp
.sq_mem_size
,
1133 ep
->qp
.sq_virt
, ep
->qp
.sq_phys
);
1134 ep
->qp
.sq_virt
= NULL
;
1139 if (ep
->qp
.rq_pgtbl_virt
) {
1140 dma_free_coherent(&hba
->pcidev
->dev
, ep
->qp
.rq_pgtbl_size
,
1141 ep
->qp
.rq_pgtbl_virt
, ep
->qp
.rq_pgtbl_phys
);
1142 ep
->qp
.rq_pgtbl_virt
= NULL
;
1143 ep
->qp
.rq_pgtbl_phys
= 0;
1145 if (ep
->qp
.rq_virt
) {
1146 dma_free_coherent(&hba
->pcidev
->dev
, ep
->qp
.rq_mem_size
,
1147 ep
->qp
.rq_virt
, ep
->qp
.rq_phys
);
1148 ep
->qp
.rq_virt
= NULL
;
1153 if (ep
->qp
.cq_pgtbl_virt
) {
1154 dma_free_coherent(&hba
->pcidev
->dev
, ep
->qp
.cq_pgtbl_size
,
1155 ep
->qp
.cq_pgtbl_virt
, ep
->qp
.cq_pgtbl_phys
);
1156 ep
->qp
.cq_pgtbl_virt
= NULL
;
1157 ep
->qp
.cq_pgtbl_phys
= 0;
1159 if (ep
->qp
.cq_virt
) {
1160 dma_free_coherent(&hba
->pcidev
->dev
, ep
->qp
.cq_mem_size
,
1161 ep
->qp
.cq_virt
, ep
->qp
.cq_phys
);
1162 ep
->qp
.cq_virt
= NULL
;
1169 * bnx2i_send_fw_iscsi_init_msg - initiates initial handshake with iscsi f/w
1170 * @hba: adapter structure pointer
1172 * Send down iscsi_init KWQEs which initiates the initial handshake with the f/w
1173 * This results in iSCSi support validation and on-chip context manager
1174 * initialization. Firmware completes this handshake with a CQE carrying
1175 * the result of iscsi support validation. Parameter carried by
1176 * iscsi init request determines the number of offloaded connection and
1177 * tolerance level for iscsi protocol violation this hba/chip can support
1179 int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba
*hba
)
1181 struct kwqe
*kwqe_arr
[3];
1182 struct iscsi_kwqe_init1 iscsi_init
;
1183 struct iscsi_kwqe_init2 iscsi_init2
;
1187 bnx2i_adjust_qp_size(hba
);
1190 ISCSI_PAGE_SIZE_4K
<< ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT
;
1192 iscsi_init
.flags
|= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE
;
1193 iscsi_init
.reserved0
= 0;
1194 iscsi_init
.num_cqs
= 1;
1195 iscsi_init
.hdr
.op_code
= ISCSI_KWQE_OPCODE_INIT1
;
1196 iscsi_init
.hdr
.flags
=
1197 (ISCSI_KWQE_LAYER_CODE
<< ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT
);
1199 iscsi_init
.dummy_buffer_addr_lo
= (u32
) hba
->dummy_buf_dma
;
1200 iscsi_init
.dummy_buffer_addr_hi
=
1201 (u32
) ((u64
) hba
->dummy_buf_dma
>> 32);
1203 hba
->ctx_ccell_tasks
=
1204 ((hba
->num_ccell
& 0xFFFF) | (hba
->max_sqes
<< 16));
1205 iscsi_init
.num_ccells_per_conn
= hba
->num_ccell
;
1206 iscsi_init
.num_tasks_per_conn
= hba
->max_sqes
;
1207 iscsi_init
.sq_wqes_per_page
= PAGE_SIZE
/ BNX2I_SQ_WQE_SIZE
;
1208 iscsi_init
.sq_num_wqes
= hba
->max_sqes
;
1209 iscsi_init
.cq_log_wqes_per_page
=
1210 (u8
) bnx2i_power_of2(PAGE_SIZE
/ BNX2I_CQE_SIZE
);
1211 iscsi_init
.cq_num_wqes
= hba
->max_cqes
;
1212 iscsi_init
.cq_num_pages
= (hba
->max_cqes
* BNX2I_CQE_SIZE
+
1213 (PAGE_SIZE
- 1)) / PAGE_SIZE
;
1214 iscsi_init
.sq_num_pages
= (hba
->max_sqes
* BNX2I_SQ_WQE_SIZE
+
1215 (PAGE_SIZE
- 1)) / PAGE_SIZE
;
1216 iscsi_init
.rq_buffer_size
= BNX2I_RQ_WQE_SIZE
;
1217 iscsi_init
.rq_num_wqes
= hba
->max_rqes
;
1220 iscsi_init2
.hdr
.op_code
= ISCSI_KWQE_OPCODE_INIT2
;
1221 iscsi_init2
.hdr
.flags
=
1222 (ISCSI_KWQE_LAYER_CODE
<< ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT
);
1223 iscsi_init2
.max_cq_sqn
= hba
->max_cqes
* 2 + 1;
1228 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV
) |
1231 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN
) |
1233 (1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN
));
1235 iscsi_init2
.error_bit_map
[0] = error_mask1
;
1237 iscsi_init2
.error_bit_map
[0] = (u32
) mask64
;
1240 iscsi_init2
.error_bit_map
[1] = error_mask2
;
1242 iscsi_init2
.error_bit_map
[1] = (u32
) (mask64
>> 32);
1244 iscsi_error_mask
= mask64
;
1246 kwqe_arr
[0] = (struct kwqe
*) &iscsi_init
;
1247 kwqe_arr
[1] = (struct kwqe
*) &iscsi_init2
;
1249 if (hba
->cnic
&& hba
->cnic
->submit_kwqes
)
1250 rc
= hba
->cnic
->submit_kwqes(hba
->cnic
, kwqe_arr
, 2);
1256 * bnx2i_process_scsi_cmd_resp - this function handles scsi cmd completion.
1257 * @conn: iscsi connection
1258 * @cqe: pointer to newly DMA'ed CQE entry for processing
1260 * process SCSI CMD Response CQE & complete the request to SCSI-ML
1262 static int bnx2i_process_scsi_cmd_resp(struct iscsi_session
*session
,
1263 struct bnx2i_conn
*bnx2i_conn
,
1266 struct iscsi_conn
*conn
= bnx2i_conn
->cls_conn
->dd_data
;
1267 struct bnx2i_cmd_response
*resp_cqe
;
1268 struct bnx2i_cmd
*bnx2i_cmd
;
1269 struct iscsi_task
*task
;
1270 struct iscsi_cmd_rsp
*hdr
;
1273 resp_cqe
= (struct bnx2i_cmd_response
*)cqe
;
1274 spin_lock(&session
->lock
);
1275 task
= iscsi_itt_to_task(conn
,
1276 resp_cqe
->itt
& ISCSI_CMD_RESPONSE_INDEX
);
1280 bnx2i_cmd
= task
->dd_data
;
1282 if (bnx2i_cmd
->req
.op_attr
& ISCSI_CMD_REQUEST_READ
) {
1283 conn
->datain_pdus_cnt
+=
1284 resp_cqe
->task_stat
.read_stat
.num_data_outs
;
1285 conn
->rxdata_octets
+=
1286 bnx2i_cmd
->req
.total_data_transfer_length
;
1288 conn
->dataout_pdus_cnt
+=
1289 resp_cqe
->task_stat
.read_stat
.num_data_outs
;
1290 conn
->r2t_pdus_cnt
+=
1291 resp_cqe
->task_stat
.read_stat
.num_r2ts
;
1292 conn
->txdata_octets
+=
1293 bnx2i_cmd
->req
.total_data_transfer_length
;
1295 bnx2i_iscsi_unmap_sg_list(bnx2i_cmd
);
1297 hdr
= (struct iscsi_cmd_rsp
*)task
->hdr
;
1298 resp_cqe
= (struct bnx2i_cmd_response
*)cqe
;
1299 hdr
->opcode
= resp_cqe
->op_code
;
1300 hdr
->max_cmdsn
= cpu_to_be32(resp_cqe
->max_cmd_sn
);
1301 hdr
->exp_cmdsn
= cpu_to_be32(resp_cqe
->exp_cmd_sn
);
1302 hdr
->response
= resp_cqe
->response
;
1303 hdr
->cmd_status
= resp_cqe
->status
;
1304 hdr
->flags
= resp_cqe
->response_flags
;
1305 hdr
->residual_count
= cpu_to_be32(resp_cqe
->residual_count
);
1307 if (resp_cqe
->op_code
== ISCSI_OP_SCSI_DATA_IN
)
1310 if (resp_cqe
->status
== SAM_STAT_CHECK_CONDITION
) {
1311 datalen
= resp_cqe
->data_length
;
1315 if (datalen
> BNX2I_RQ_WQE_SIZE
) {
1316 iscsi_conn_printk(KERN_ERR
, conn
,
1317 "sense data len %d > RQ sz\n",
1319 datalen
= BNX2I_RQ_WQE_SIZE
;
1320 } else if (datalen
> ISCSI_DEF_MAX_RECV_SEG_LEN
) {
1321 iscsi_conn_printk(KERN_ERR
, conn
,
1322 "sense data len %d > conn data\n",
1324 datalen
= ISCSI_DEF_MAX_RECV_SEG_LEN
;
1327 bnx2i_get_rq_buf(bnx2i_cmd
->conn
, conn
->data
, datalen
);
1328 bnx2i_put_rq_buf(bnx2i_cmd
->conn
, 1);
1332 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)hdr
,
1333 conn
->data
, datalen
);
1335 spin_unlock(&session
->lock
);
1341 * bnx2i_process_login_resp - this function handles iscsi login response
1342 * @session: iscsi session pointer
1343 * @bnx2i_conn: iscsi connection pointer
1344 * @cqe: pointer to newly DMA'ed CQE entry for processing
1346 * process Login Response CQE & complete it to open-iscsi user daemon
1348 static int bnx2i_process_login_resp(struct iscsi_session
*session
,
1349 struct bnx2i_conn
*bnx2i_conn
,
1352 struct iscsi_conn
*conn
= bnx2i_conn
->cls_conn
->dd_data
;
1353 struct iscsi_task
*task
;
1354 struct bnx2i_login_response
*login
;
1355 struct iscsi_login_rsp
*resp_hdr
;
1359 login
= (struct bnx2i_login_response
*) cqe
;
1360 spin_lock(&session
->lock
);
1361 task
= iscsi_itt_to_task(conn
,
1362 login
->itt
& ISCSI_LOGIN_RESPONSE_INDEX
);
1366 resp_hdr
= (struct iscsi_login_rsp
*) &bnx2i_conn
->gen_pdu
.resp_hdr
;
1367 memset(resp_hdr
, 0, sizeof(struct iscsi_hdr
));
1368 resp_hdr
->opcode
= login
->op_code
;
1369 resp_hdr
->flags
= login
->response_flags
;
1370 resp_hdr
->max_version
= login
->version_max
;
1371 resp_hdr
->active_version
= login
->version_active
;
1372 resp_hdr
->hlength
= 0;
1374 hton24(resp_hdr
->dlength
, login
->data_length
);
1375 memcpy(resp_hdr
->isid
, &login
->isid_lo
, 6);
1376 resp_hdr
->tsih
= cpu_to_be16(login
->tsih
);
1377 resp_hdr
->itt
= task
->hdr
->itt
;
1378 resp_hdr
->statsn
= cpu_to_be32(login
->stat_sn
);
1379 resp_hdr
->exp_cmdsn
= cpu_to_be32(login
->exp_cmd_sn
);
1380 resp_hdr
->max_cmdsn
= cpu_to_be32(login
->max_cmd_sn
);
1381 resp_hdr
->status_class
= login
->status_class
;
1382 resp_hdr
->status_detail
= login
->status_detail
;
1383 pld_len
= login
->data_length
;
1384 bnx2i_conn
->gen_pdu
.resp_wr_ptr
=
1385 bnx2i_conn
->gen_pdu
.resp_buf
+ pld_len
;
1389 pad_len
= 4 - (pld_len
% 4);
1393 for (i
= 0; i
< pad_len
; i
++) {
1394 bnx2i_conn
->gen_pdu
.resp_wr_ptr
[0] = 0;
1395 bnx2i_conn
->gen_pdu
.resp_wr_ptr
++;
1399 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)resp_hdr
,
1400 bnx2i_conn
->gen_pdu
.resp_buf
,
1401 bnx2i_conn
->gen_pdu
.resp_wr_ptr
- bnx2i_conn
->gen_pdu
.resp_buf
);
1403 spin_unlock(&session
->lock
);
1408 * bnx2i_process_tmf_resp - this function handles iscsi TMF response
1409 * @session: iscsi session pointer
1410 * @bnx2i_conn: iscsi connection pointer
1411 * @cqe: pointer to newly DMA'ed CQE entry for processing
1413 * process iSCSI TMF Response CQE and wake up the driver eh thread.
1415 static int bnx2i_process_tmf_resp(struct iscsi_session
*session
,
1416 struct bnx2i_conn
*bnx2i_conn
,
1419 struct iscsi_conn
*conn
= bnx2i_conn
->cls_conn
->dd_data
;
1420 struct iscsi_task
*task
;
1421 struct bnx2i_tmf_response
*tmf_cqe
;
1422 struct iscsi_tm_rsp
*resp_hdr
;
1424 tmf_cqe
= (struct bnx2i_tmf_response
*)cqe
;
1425 spin_lock(&session
->lock
);
1426 task
= iscsi_itt_to_task(conn
,
1427 tmf_cqe
->itt
& ISCSI_TMF_RESPONSE_INDEX
);
1431 resp_hdr
= (struct iscsi_tm_rsp
*) &bnx2i_conn
->gen_pdu
.resp_hdr
;
1432 memset(resp_hdr
, 0, sizeof(struct iscsi_hdr
));
1433 resp_hdr
->opcode
= tmf_cqe
->op_code
;
1434 resp_hdr
->max_cmdsn
= cpu_to_be32(tmf_cqe
->max_cmd_sn
);
1435 resp_hdr
->exp_cmdsn
= cpu_to_be32(tmf_cqe
->exp_cmd_sn
);
1436 resp_hdr
->itt
= task
->hdr
->itt
;
1437 resp_hdr
->response
= tmf_cqe
->response
;
1439 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)resp_hdr
, NULL
, 0);
1441 spin_unlock(&session
->lock
);
1446 * bnx2i_process_logout_resp - this function handles iscsi logout response
1447 * @session: iscsi session pointer
1448 * @bnx2i_conn: iscsi connection pointer
1449 * @cqe: pointer to newly DMA'ed CQE entry for processing
1451 * process iSCSI Logout Response CQE & make function call to
1452 * notify the user daemon.
1454 static int bnx2i_process_logout_resp(struct iscsi_session
*session
,
1455 struct bnx2i_conn
*bnx2i_conn
,
1458 struct iscsi_conn
*conn
= bnx2i_conn
->cls_conn
->dd_data
;
1459 struct iscsi_task
*task
;
1460 struct bnx2i_logout_response
*logout
;
1461 struct iscsi_logout_rsp
*resp_hdr
;
1463 logout
= (struct bnx2i_logout_response
*) cqe
;
1464 spin_lock(&session
->lock
);
1465 task
= iscsi_itt_to_task(conn
,
1466 logout
->itt
& ISCSI_LOGOUT_RESPONSE_INDEX
);
1470 resp_hdr
= (struct iscsi_logout_rsp
*) &bnx2i_conn
->gen_pdu
.resp_hdr
;
1471 memset(resp_hdr
, 0, sizeof(struct iscsi_hdr
));
1472 resp_hdr
->opcode
= logout
->op_code
;
1473 resp_hdr
->flags
= logout
->response
;
1474 resp_hdr
->hlength
= 0;
1476 resp_hdr
->itt
= task
->hdr
->itt
;
1477 resp_hdr
->statsn
= task
->hdr
->exp_statsn
;
1478 resp_hdr
->exp_cmdsn
= cpu_to_be32(logout
->exp_cmd_sn
);
1479 resp_hdr
->max_cmdsn
= cpu_to_be32(logout
->max_cmd_sn
);
1481 resp_hdr
->t2wait
= cpu_to_be32(logout
->time_to_wait
);
1482 resp_hdr
->t2retain
= cpu_to_be32(logout
->time_to_retain
);
1484 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)resp_hdr
, NULL
, 0);
1486 spin_unlock(&session
->lock
);
1491 * bnx2i_process_nopin_local_cmpl - this function handles iscsi nopin CQE
1492 * @session: iscsi session pointer
1493 * @bnx2i_conn: iscsi connection pointer
1494 * @cqe: pointer to newly DMA'ed CQE entry for processing
1496 * process iSCSI NOPIN local completion CQE, frees IIT and command structures
1498 static void bnx2i_process_nopin_local_cmpl(struct iscsi_session
*session
,
1499 struct bnx2i_conn
*bnx2i_conn
,
1502 struct iscsi_conn
*conn
= bnx2i_conn
->cls_conn
->dd_data
;
1503 struct bnx2i_nop_in_msg
*nop_in
;
1504 struct iscsi_task
*task
;
1506 nop_in
= (struct bnx2i_nop_in_msg
*)cqe
;
1507 spin_lock(&session
->lock
);
1508 task
= iscsi_itt_to_task(conn
,
1509 nop_in
->itt
& ISCSI_NOP_IN_MSG_INDEX
);
1511 iscsi_put_task(task
);
1512 spin_unlock(&session
->lock
);
1516 * bnx2i_unsol_pdu_adjust_rq - makes adjustments to RQ after unsol pdu is recvd
1517 * @conn: iscsi connection
1519 * Firmware advances RQ producer index for every unsolicited PDU even if
1520 * payload data length is '0'. This function makes corresponding
1521 * adjustments on the driver side to match this f/w behavior
1523 static void bnx2i_unsol_pdu_adjust_rq(struct bnx2i_conn
*bnx2i_conn
)
1525 char dummy_rq_data
[2];
1526 bnx2i_get_rq_buf(bnx2i_conn
, dummy_rq_data
, 1);
1527 bnx2i_put_rq_buf(bnx2i_conn
, 1);
1532 * bnx2i_process_nopin_mesg - this function handles iscsi nopin CQE
1533 * @session: iscsi session pointer
1534 * @bnx2i_conn: iscsi connection pointer
1535 * @cqe: pointer to newly DMA'ed CQE entry for processing
1537 * process iSCSI target's proactive iSCSI NOPIN request
1539 static int bnx2i_process_nopin_mesg(struct iscsi_session
*session
,
1540 struct bnx2i_conn
*bnx2i_conn
,
1543 struct iscsi_conn
*conn
= bnx2i_conn
->cls_conn
->dd_data
;
1544 struct iscsi_task
*task
;
1545 struct bnx2i_nop_in_msg
*nop_in
;
1546 struct iscsi_nopin
*hdr
;
1548 int tgt_async_nop
= 0;
1550 nop_in
= (struct bnx2i_nop_in_msg
*)cqe
;
1551 itt
= nop_in
->itt
& ISCSI_NOP_IN_MSG_INDEX
;
1553 spin_lock(&session
->lock
);
1554 hdr
= (struct iscsi_nopin
*)&bnx2i_conn
->gen_pdu
.resp_hdr
;
1555 memset(hdr
, 0, sizeof(struct iscsi_hdr
));
1556 hdr
->opcode
= nop_in
->op_code
;
1557 hdr
->max_cmdsn
= cpu_to_be32(nop_in
->max_cmd_sn
);
1558 hdr
->exp_cmdsn
= cpu_to_be32(nop_in
->exp_cmd_sn
);
1559 hdr
->ttt
= cpu_to_be32(nop_in
->ttt
);
1561 if (itt
== (u16
) RESERVED_ITT
) {
1562 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn
);
1563 hdr
->itt
= RESERVED_ITT
;
1568 /* this is a response to one of our nop-outs */
1569 task
= iscsi_itt_to_task(conn
, itt
);
1571 hdr
->flags
= ISCSI_FLAG_CMD_FINAL
;
1572 hdr
->itt
= task
->hdr
->itt
;
1573 hdr
->ttt
= cpu_to_be32(nop_in
->ttt
);
1574 memcpy(hdr
->lun
, nop_in
->lun
, 8);
1577 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)hdr
, NULL
, 0);
1578 spin_unlock(&session
->lock
);
1580 return tgt_async_nop
;
1585 * bnx2i_process_async_mesg - this function handles iscsi async message
1586 * @session: iscsi session pointer
1587 * @bnx2i_conn: iscsi connection pointer
1588 * @cqe: pointer to newly DMA'ed CQE entry for processing
1590 * process iSCSI ASYNC Message
1592 static void bnx2i_process_async_mesg(struct iscsi_session
*session
,
1593 struct bnx2i_conn
*bnx2i_conn
,
1596 struct bnx2i_async_msg
*async_cqe
;
1597 struct iscsi_async
*resp_hdr
;
1600 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn
);
1602 async_cqe
= (struct bnx2i_async_msg
*)cqe
;
1603 async_event
= async_cqe
->async_event
;
1605 if (async_event
== ISCSI_ASYNC_MSG_SCSI_EVENT
) {
1606 iscsi_conn_printk(KERN_ALERT
, bnx2i_conn
->cls_conn
->dd_data
,
1607 "async: scsi events not supported\n");
1611 spin_lock(&session
->lock
);
1612 resp_hdr
= (struct iscsi_async
*) &bnx2i_conn
->gen_pdu
.resp_hdr
;
1613 memset(resp_hdr
, 0, sizeof(struct iscsi_hdr
));
1614 resp_hdr
->opcode
= async_cqe
->op_code
;
1615 resp_hdr
->flags
= 0x80;
1617 memcpy(resp_hdr
->lun
, async_cqe
->lun
, 8);
1618 resp_hdr
->exp_cmdsn
= cpu_to_be32(async_cqe
->exp_cmd_sn
);
1619 resp_hdr
->max_cmdsn
= cpu_to_be32(async_cqe
->max_cmd_sn
);
1621 resp_hdr
->async_event
= async_cqe
->async_event
;
1622 resp_hdr
->async_vcode
= async_cqe
->async_vcode
;
1624 resp_hdr
->param1
= cpu_to_be16(async_cqe
->param1
);
1625 resp_hdr
->param2
= cpu_to_be16(async_cqe
->param2
);
1626 resp_hdr
->param3
= cpu_to_be16(async_cqe
->param3
);
1628 __iscsi_complete_pdu(bnx2i_conn
->cls_conn
->dd_data
,
1629 (struct iscsi_hdr
*)resp_hdr
, NULL
, 0);
1630 spin_unlock(&session
->lock
);
1635 * bnx2i_process_reject_mesg - process iscsi reject pdu
1636 * @session: iscsi session pointer
1637 * @bnx2i_conn: iscsi connection pointer
1638 * @cqe: pointer to newly DMA'ed CQE entry for processing
1640 * process iSCSI REJECT message
1642 static void bnx2i_process_reject_mesg(struct iscsi_session
*session
,
1643 struct bnx2i_conn
*bnx2i_conn
,
1646 struct iscsi_conn
*conn
= bnx2i_conn
->cls_conn
->dd_data
;
1647 struct bnx2i_reject_msg
*reject
;
1648 struct iscsi_reject
*hdr
;
1650 reject
= (struct bnx2i_reject_msg
*) cqe
;
1651 if (reject
->data_length
) {
1652 bnx2i_get_rq_buf(bnx2i_conn
, conn
->data
, reject
->data_length
);
1653 bnx2i_put_rq_buf(bnx2i_conn
, 1);
1655 bnx2i_unsol_pdu_adjust_rq(bnx2i_conn
);
1657 spin_lock(&session
->lock
);
1658 hdr
= (struct iscsi_reject
*) &bnx2i_conn
->gen_pdu
.resp_hdr
;
1659 memset(hdr
, 0, sizeof(struct iscsi_hdr
));
1660 hdr
->opcode
= reject
->op_code
;
1661 hdr
->reason
= reject
->reason
;
1662 hton24(hdr
->dlength
, reject
->data_length
);
1663 hdr
->max_cmdsn
= cpu_to_be32(reject
->max_cmd_sn
);
1664 hdr
->exp_cmdsn
= cpu_to_be32(reject
->exp_cmd_sn
);
1665 hdr
->ffffffff
= cpu_to_be32(RESERVED_ITT
);
1666 __iscsi_complete_pdu(conn
, (struct iscsi_hdr
*)hdr
, conn
->data
,
1667 reject
->data_length
);
1668 spin_unlock(&session
->lock
);
1672 * bnx2i_process_cmd_cleanup_resp - process scsi command clean-up completion
1673 * @session: iscsi session pointer
1674 * @bnx2i_conn: iscsi connection pointer
1675 * @cqe: pointer to newly DMA'ed CQE entry for processing
1677 * process command cleanup response CQE during conn shutdown or error recovery
1679 static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session
*session
,
1680 struct bnx2i_conn
*bnx2i_conn
,
1683 struct bnx2i_cleanup_response
*cmd_clean_rsp
;
1684 struct iscsi_conn
*conn
= bnx2i_conn
->cls_conn
->dd_data
;
1685 struct iscsi_task
*task
;
1687 cmd_clean_rsp
= (struct bnx2i_cleanup_response
*)cqe
;
1688 spin_lock(&session
->lock
);
1689 task
= iscsi_itt_to_task(conn
,
1690 cmd_clean_rsp
->itt
& ISCSI_CLEANUP_RESPONSE_INDEX
);
1692 printk(KERN_ALERT
"bnx2i: cmd clean ITT %x not active\n",
1693 cmd_clean_rsp
->itt
& ISCSI_CLEANUP_RESPONSE_INDEX
);
1694 spin_unlock(&session
->lock
);
1695 complete(&bnx2i_conn
->cmd_cleanup_cmpl
);
1701 * bnx2i_process_new_cqes - process newly DMA'ed CQE's
1702 * @bnx2i_conn: iscsi connection
1704 * this function is called by generic KCQ handler to process all pending CQE's
1706 static void bnx2i_process_new_cqes(struct bnx2i_conn
*bnx2i_conn
)
1708 struct iscsi_conn
*conn
= bnx2i_conn
->cls_conn
->dd_data
;
1709 struct iscsi_session
*session
= conn
->session
;
1710 struct qp_info
*qp
= &bnx2i_conn
->ep
->qp
;
1711 struct bnx2i_nop_in_msg
*nopin
;
1715 nopin
= (struct bnx2i_nop_in_msg
*) qp
->cq_cons_qe
;
1716 if (nopin
->cq_req_sn
!= qp
->cqe_exp_seq_sn
)
1719 if (unlikely(test_bit(ISCSI_SUSPEND_BIT
, &conn
->suspend_rx
)))
1724 switch (nopin
->op_code
) {
1725 case ISCSI_OP_SCSI_CMD_RSP
:
1726 case ISCSI_OP_SCSI_DATA_IN
:
1727 bnx2i_process_scsi_cmd_resp(session
, bnx2i_conn
,
1730 case ISCSI_OP_LOGIN_RSP
:
1731 bnx2i_process_login_resp(session
, bnx2i_conn
,
1734 case ISCSI_OP_SCSI_TMFUNC_RSP
:
1735 bnx2i_process_tmf_resp(session
, bnx2i_conn
,
1738 case ISCSI_OP_LOGOUT_RSP
:
1739 bnx2i_process_logout_resp(session
, bnx2i_conn
,
1742 case ISCSI_OP_NOOP_IN
:
1743 if (bnx2i_process_nopin_mesg(session
, bnx2i_conn
,
1747 case ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION
:
1748 bnx2i_process_nopin_local_cmpl(session
, bnx2i_conn
,
1751 case ISCSI_OP_ASYNC_EVENT
:
1752 bnx2i_process_async_mesg(session
, bnx2i_conn
,
1756 case ISCSI_OP_REJECT
:
1757 bnx2i_process_reject_mesg(session
, bnx2i_conn
,
1760 case ISCSI_OPCODE_CLEANUP_RESPONSE
:
1761 bnx2i_process_cmd_cleanup_resp(session
, bnx2i_conn
,
1765 printk(KERN_ALERT
"bnx2i: unknown opcode 0x%x\n",
1770 bnx2i_conn
->ep
->num_active_cmds
--;
1772 /* clear out in production version only, till beta keep opcode
1773 * field intact, will be helpful in debugging (context dump)
1774 * nopin->op_code = 0;
1776 qp
->cqe_exp_seq_sn
++;
1777 if (qp
->cqe_exp_seq_sn
== (qp
->cqe_size
* 2 + 1))
1778 qp
->cqe_exp_seq_sn
= ISCSI_INITIAL_SN
;
1780 if (qp
->cq_cons_qe
== qp
->cq_last_qe
) {
1781 qp
->cq_cons_qe
= qp
->cq_first_qe
;
1782 qp
->cq_cons_idx
= 0;
1788 bnx2i_arm_cq_event_coalescing(bnx2i_conn
->ep
, CNIC_ARM_CQE
);
1792 * bnx2i_fastpath_notification - process global event queue (KCQ)
1793 * @hba: adapter structure pointer
1794 * @new_cqe_kcqe: pointer to newly DMA'ed KCQE entry
1796 * Fast path event notification handler, KCQ entry carries context id
1797 * of the connection that has 1 or more pending CQ entries
1799 static void bnx2i_fastpath_notification(struct bnx2i_hba
*hba
,
1800 struct iscsi_kcqe
*new_cqe_kcqe
)
1802 struct bnx2i_conn
*conn
;
1805 iscsi_cid
= new_cqe_kcqe
->iscsi_conn_id
;
1806 conn
= bnx2i_get_conn_from_id(hba
, iscsi_cid
);
1809 printk(KERN_ALERT
"cid #%x not valid\n", iscsi_cid
);
1813 printk(KERN_ALERT
"cid #%x - ep not bound\n", iscsi_cid
);
1817 bnx2i_process_new_cqes(conn
);
1822 * bnx2i_process_update_conn_cmpl - process iscsi conn update completion KCQE
1823 * @hba: adapter structure pointer
1824 * @update_kcqe: kcqe pointer
1826 * CONN_UPDATE completion handler, this completes iSCSI connection FFP migration
1828 static void bnx2i_process_update_conn_cmpl(struct bnx2i_hba
*hba
,
1829 struct iscsi_kcqe
*update_kcqe
)
1831 struct bnx2i_conn
*conn
;
1834 iscsi_cid
= update_kcqe
->iscsi_conn_id
;
1835 conn
= bnx2i_get_conn_from_id(hba
, iscsi_cid
);
1838 printk(KERN_ALERT
"conn_update: cid %x not valid\n", iscsi_cid
);
1842 printk(KERN_ALERT
"cid %x does not have ep bound\n", iscsi_cid
);
1846 if (update_kcqe
->completion_status
) {
1847 printk(KERN_ALERT
"request failed cid %x\n", iscsi_cid
);
1848 conn
->ep
->state
= EP_STATE_ULP_UPDATE_FAILED
;
1850 conn
->ep
->state
= EP_STATE_ULP_UPDATE_COMPL
;
1852 wake_up_interruptible(&conn
->ep
->ofld_wait
);
1857 * bnx2i_recovery_que_add_conn - add connection to recovery queue
1858 * @hba: adapter structure pointer
1859 * @bnx2i_conn: iscsi connection
1861 * Add connection to recovery queue and schedule adapter eh worker
1863 static void bnx2i_recovery_que_add_conn(struct bnx2i_hba
*hba
,
1864 struct bnx2i_conn
*bnx2i_conn
)
1866 iscsi_conn_failure(bnx2i_conn
->cls_conn
->dd_data
,
1867 ISCSI_ERR_CONN_FAILED
);
1872 * bnx2i_process_tcp_error - process error notification on a given connection
1874 * @hba: adapter structure pointer
1875 * @tcp_err: tcp error kcqe pointer
1877 * handles tcp level error notifications from FW.
1879 static void bnx2i_process_tcp_error(struct bnx2i_hba
*hba
,
1880 struct iscsi_kcqe
*tcp_err
)
1882 struct bnx2i_conn
*bnx2i_conn
;
1885 iscsi_cid
= tcp_err
->iscsi_conn_id
;
1886 bnx2i_conn
= bnx2i_get_conn_from_id(hba
, iscsi_cid
);
1889 printk(KERN_ALERT
"bnx2i - cid 0x%x not valid\n", iscsi_cid
);
1893 printk(KERN_ALERT
"bnx2i - cid 0x%x had TCP errors, error code 0x%x\n",
1894 iscsi_cid
, tcp_err
->completion_status
);
1895 bnx2i_recovery_que_add_conn(bnx2i_conn
->hba
, bnx2i_conn
);
1900 * bnx2i_process_iscsi_error - process error notification on a given connection
1901 * @hba: adapter structure pointer
1902 * @iscsi_err: iscsi error kcqe pointer
1904 * handles iscsi error notifications from the FW. Firmware based in initial
1905 * handshake classifies iscsi protocol / TCP rfc violation into either
1906 * warning or error indications. If indication is of "Error" type, driver
1907 * will initiate session recovery for that connection/session. For
1908 * "Warning" type indication, driver will put out a system log message
1909 * (there will be only one message for each type for the life of the
1910 * session, this is to avoid un-necessarily overloading the system)
1912 static void bnx2i_process_iscsi_error(struct bnx2i_hba
*hba
,
1913 struct iscsi_kcqe
*iscsi_err
)
1915 struct bnx2i_conn
*bnx2i_conn
;
1917 char warn_notice
[] = "iscsi_warning";
1918 char error_notice
[] = "iscsi_error";
1919 char additional_notice
[64];
1924 iscsi_cid
= iscsi_err
->iscsi_conn_id
;
1925 bnx2i_conn
= bnx2i_get_conn_from_id(hba
, iscsi_cid
);
1927 printk(KERN_ALERT
"bnx2i - cid 0x%x not valid\n", iscsi_cid
);
1931 err_mask64
= (0x1ULL
<< iscsi_err
->completion_status
);
1933 if (err_mask64
& iscsi_error_mask
) {
1935 message
= warn_notice
;
1938 message
= error_notice
;
1941 switch (iscsi_err
->completion_status
) {
1942 case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR
:
1943 strcpy(additional_notice
, "hdr digest err");
1945 case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR
:
1946 strcpy(additional_notice
, "data digest err");
1948 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE
:
1949 strcpy(additional_notice
, "wrong opcode rcvd");
1951 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN
:
1952 strcpy(additional_notice
, "AHS len > 0 rcvd");
1954 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT
:
1955 strcpy(additional_notice
, "invalid ITT rcvd");
1957 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN
:
1958 strcpy(additional_notice
, "wrong StatSN rcvd");
1960 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN
:
1961 strcpy(additional_notice
, "wrong DataSN rcvd");
1963 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T
:
1964 strcpy(additional_notice
, "pend R2T violation");
1966 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0
:
1967 strcpy(additional_notice
, "ERL0, UO");
1969 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1
:
1970 strcpy(additional_notice
, "ERL0, U1");
1972 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2
:
1973 strcpy(additional_notice
, "ERL0, U2");
1975 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3
:
1976 strcpy(additional_notice
, "ERL0, U3");
1978 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4
:
1979 strcpy(additional_notice
, "ERL0, U4");
1981 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5
:
1982 strcpy(additional_notice
, "ERL0, U5");
1984 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6
:
1985 strcpy(additional_notice
, "ERL0, U6");
1987 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN
:
1988 strcpy(additional_notice
, "invalid resi len");
1990 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN
:
1991 strcpy(additional_notice
, "MRDSL violation");
1993 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO
:
1994 strcpy(additional_notice
, "F-bit not set");
1996 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV
:
1997 strcpy(additional_notice
, "invalid TTT");
1999 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN
:
2000 strcpy(additional_notice
, "invalid DataSN");
2002 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN
:
2003 strcpy(additional_notice
, "burst len violation");
2005 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF
:
2006 strcpy(additional_notice
, "buf offset violation");
2008 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN
:
2009 strcpy(additional_notice
, "invalid LUN field");
2011 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN
:
2012 strcpy(additional_notice
, "invalid R2TSN field");
2014 #define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0 \
2015 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0
2016 case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0
:
2017 strcpy(additional_notice
, "invalid cmd len1");
2019 #define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1 \
2020 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1
2021 case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1
:
2022 strcpy(additional_notice
, "invalid cmd len2");
2024 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED
:
2025 strcpy(additional_notice
,
2026 "pend r2t exceeds MaxOutstandingR2T value");
2028 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV
:
2029 strcpy(additional_notice
, "TTT is rsvd");
2031 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN
:
2032 strcpy(additional_notice
, "MBL violation");
2034 #define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO \
2035 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO
2036 case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO
:
2037 strcpy(additional_notice
, "data seg len != 0");
2039 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN
:
2040 strcpy(additional_notice
, "reject pdu len error");
2042 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN
:
2043 strcpy(additional_notice
, "async pdu len error");
2045 case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN
:
2046 strcpy(additional_notice
, "nopin pdu len error");
2048 #define BNX2_ERR_PEND_R2T_IN_CLEANUP \
2049 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP
2050 case BNX2_ERR_PEND_R2T_IN_CLEANUP
:
2051 strcpy(additional_notice
, "pend r2t in cleanup");
2054 case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT
:
2055 strcpy(additional_notice
, "IP fragments rcvd");
2057 case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS
:
2058 strcpy(additional_notice
, "IP options error");
2060 case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG
:
2061 strcpy(additional_notice
, "urgent flag error");
2064 printk(KERN_ALERT
"iscsi_err - unknown err %x\n",
2065 iscsi_err
->completion_status
);
2068 if (need_recovery
) {
2069 iscsi_conn_printk(KERN_ALERT
,
2070 bnx2i_conn
->cls_conn
->dd_data
,
2072 message
, additional_notice
);
2074 iscsi_conn_printk(KERN_ALERT
,
2075 bnx2i_conn
->cls_conn
->dd_data
,
2076 "conn_err - hostno %d conn %p, "
2077 "iscsi_cid %x cid %x\n",
2078 bnx2i_conn
->hba
->shost
->host_no
,
2079 bnx2i_conn
, bnx2i_conn
->ep
->ep_iscsi_cid
,
2080 bnx2i_conn
->ep
->ep_cid
);
2081 bnx2i_recovery_que_add_conn(bnx2i_conn
->hba
, bnx2i_conn
);
2083 if (!test_and_set_bit(iscsi_err
->completion_status
,
2084 (void *) &bnx2i_conn
->violation_notified
))
2085 iscsi_conn_printk(KERN_ALERT
,
2086 bnx2i_conn
->cls_conn
->dd_data
,
2088 message
, additional_notice
);
2093 * bnx2i_process_conn_destroy_cmpl - process iscsi conn destroy completion
2094 * @hba: adapter structure pointer
2095 * @conn_destroy: conn destroy kcqe pointer
2097 * handles connection destroy completion request.
2099 static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba
*hba
,
2100 struct iscsi_kcqe
*conn_destroy
)
2102 struct bnx2i_endpoint
*ep
;
2104 ep
= bnx2i_find_ep_in_destroy_list(hba
, conn_destroy
->iscsi_conn_id
);
2106 printk(KERN_ALERT
"bnx2i_conn_destroy_cmpl: no pending "
2107 "offload request, unexpected complection\n");
2111 if (hba
!= ep
->hba
) {
2112 printk(KERN_ALERT
"conn destroy- error hba mis-match\n");
2116 if (conn_destroy
->completion_status
) {
2117 printk(KERN_ALERT
"conn_destroy_cmpl: op failed\n");
2118 ep
->state
= EP_STATE_CLEANUP_FAILED
;
2120 ep
->state
= EP_STATE_CLEANUP_CMPL
;
2121 wake_up_interruptible(&ep
->ofld_wait
);
2126 * bnx2i_process_ofld_cmpl - process initial iscsi conn offload completion
2127 * @hba: adapter structure pointer
2128 * @ofld_kcqe: conn offload kcqe pointer
2130 * handles initial connection offload completion, ep_connect() thread is
2131 * woken-up to continue with LLP connect process
2133 static void bnx2i_process_ofld_cmpl(struct bnx2i_hba
*hba
,
2134 struct iscsi_kcqe
*ofld_kcqe
)
2137 struct bnx2i_endpoint
*ep
;
2140 ep
= bnx2i_find_ep_in_ofld_list(hba
, ofld_kcqe
->iscsi_conn_id
);
2142 printk(KERN_ALERT
"ofld_cmpl: no pend offload request\n");
2146 if (hba
!= ep
->hba
) {
2147 printk(KERN_ALERT
"ofld_cmpl: error hba mis-match\n");
2151 if (ofld_kcqe
->completion_status
) {
2152 if (ofld_kcqe
->completion_status
==
2153 ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE
)
2154 printk(KERN_ALERT
"bnx2i: unable to allocate"
2155 " iSCSI context resources\n");
2156 ep
->state
= EP_STATE_OFLD_FAILED
;
2158 ep
->state
= EP_STATE_OFLD_COMPL
;
2159 cid_addr
= ofld_kcqe
->iscsi_conn_context_id
;
2160 cid_num
= bnx2i_get_cid_num(ep
);
2161 ep
->ep_cid
= cid_addr
;
2162 ep
->qp
.ctx_base
= NULL
;
2164 wake_up_interruptible(&ep
->ofld_wait
);
2168 * bnx2i_indicate_kcqe - process iscsi conn update completion KCQE
2169 * @hba: adapter structure pointer
2170 * @update_kcqe: kcqe pointer
2172 * Generic KCQ event handler/dispatcher
2174 static void bnx2i_indicate_kcqe(void *context
, struct kcqe
*kcqe
[],
2177 struct bnx2i_hba
*hba
= context
;
2179 struct iscsi_kcqe
*ikcqe
= NULL
;
2181 while (i
< num_cqe
) {
2182 ikcqe
= (struct iscsi_kcqe
*) kcqe
[i
++];
2184 if (ikcqe
->op_code
==
2185 ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION
)
2186 bnx2i_fastpath_notification(hba
, ikcqe
);
2187 else if (ikcqe
->op_code
== ISCSI_KCQE_OPCODE_OFFLOAD_CONN
)
2188 bnx2i_process_ofld_cmpl(hba
, ikcqe
);
2189 else if (ikcqe
->op_code
== ISCSI_KCQE_OPCODE_UPDATE_CONN
)
2190 bnx2i_process_update_conn_cmpl(hba
, ikcqe
);
2191 else if (ikcqe
->op_code
== ISCSI_KCQE_OPCODE_INIT
) {
2192 if (ikcqe
->completion_status
!=
2193 ISCSI_KCQE_COMPLETION_STATUS_SUCCESS
)
2194 bnx2i_iscsi_license_error(hba
, ikcqe
->\
2197 set_bit(ADAPTER_STATE_UP
, &hba
->adapter_state
);
2198 bnx2i_get_link_state(hba
);
2199 printk(KERN_INFO
"bnx2i [%.2x:%.2x.%.2x]: "
2200 "ISCSI_INIT passed\n",
2201 (u8
)hba
->pcidev
->bus
->number
,
2207 } else if (ikcqe
->op_code
== ISCSI_KCQE_OPCODE_DESTROY_CONN
)
2208 bnx2i_process_conn_destroy_cmpl(hba
, ikcqe
);
2209 else if (ikcqe
->op_code
== ISCSI_KCQE_OPCODE_ISCSI_ERROR
)
2210 bnx2i_process_iscsi_error(hba
, ikcqe
);
2211 else if (ikcqe
->op_code
== ISCSI_KCQE_OPCODE_TCP_ERROR
)
2212 bnx2i_process_tcp_error(hba
, ikcqe
);
2214 printk(KERN_ALERT
"bnx2i: unknown opcode 0x%x\n",
2221 * bnx2i_indicate_netevent - Generic netdev event handler
2222 * @context: adapter structure pointer
2223 * @event: event type
2225 * Handles four netdev events, NETDEV_UP, NETDEV_DOWN,
2226 * NETDEV_GOING_DOWN and NETDEV_CHANGE
2228 static void bnx2i_indicate_netevent(void *context
, unsigned long event
)
2230 struct bnx2i_hba
*hba
= context
;
2234 if (!test_bit(ADAPTER_STATE_UP
, &hba
->adapter_state
))
2235 bnx2i_send_fw_iscsi_init_msg(hba
);
2238 clear_bit(ADAPTER_STATE_GOING_DOWN
, &hba
->adapter_state
);
2239 clear_bit(ADAPTER_STATE_UP
, &hba
->adapter_state
);
2241 case NETDEV_GOING_DOWN
:
2242 set_bit(ADAPTER_STATE_GOING_DOWN
, &hba
->adapter_state
);
2243 iscsi_host_for_each_session(hba
->shost
,
2244 bnx2i_drop_session
);
2247 bnx2i_get_link_state(hba
);
2256 * bnx2i_cm_connect_cmpl - process iscsi conn establishment completion
2257 * @cm_sk: cnic sock structure pointer
2259 * function callback exported via bnx2i - cnic driver interface to
2260 * indicate completion of option-2 TCP connect request.
2262 static void bnx2i_cm_connect_cmpl(struct cnic_sock
*cm_sk
)
2264 struct bnx2i_endpoint
*ep
= (struct bnx2i_endpoint
*) cm_sk
->context
;
2266 if (test_bit(ADAPTER_STATE_GOING_DOWN
, &ep
->hba
->adapter_state
))
2267 ep
->state
= EP_STATE_CONNECT_FAILED
;
2268 else if (test_bit(SK_F_OFFLD_COMPLETE
, &cm_sk
->flags
))
2269 ep
->state
= EP_STATE_CONNECT_COMPL
;
2271 ep
->state
= EP_STATE_CONNECT_FAILED
;
2273 wake_up_interruptible(&ep
->ofld_wait
);
2278 * bnx2i_cm_close_cmpl - process tcp conn close completion
2279 * @cm_sk: cnic sock structure pointer
2281 * function callback exported via bnx2i - cnic driver interface to
2282 * indicate completion of option-2 graceful TCP connect shutdown
2284 static void bnx2i_cm_close_cmpl(struct cnic_sock
*cm_sk
)
2286 struct bnx2i_endpoint
*ep
= (struct bnx2i_endpoint
*) cm_sk
->context
;
2288 ep
->state
= EP_STATE_DISCONN_COMPL
;
2289 wake_up_interruptible(&ep
->ofld_wait
);
2294 * bnx2i_cm_abort_cmpl - process abortive tcp conn teardown completion
2295 * @cm_sk: cnic sock structure pointer
2297 * function callback exported via bnx2i - cnic driver interface to
2298 * indicate completion of option-2 abortive TCP connect termination
2300 static void bnx2i_cm_abort_cmpl(struct cnic_sock
*cm_sk
)
2302 struct bnx2i_endpoint
*ep
= (struct bnx2i_endpoint
*) cm_sk
->context
;
2304 ep
->state
= EP_STATE_DISCONN_COMPL
;
2305 wake_up_interruptible(&ep
->ofld_wait
);
2310 * bnx2i_cm_remote_close - process received TCP FIN
2311 * @hba: adapter structure pointer
2312 * @update_kcqe: kcqe pointer
2314 * function callback exported via bnx2i - cnic driver interface to indicate
2315 * async TCP events such as FIN
2317 static void bnx2i_cm_remote_close(struct cnic_sock
*cm_sk
)
2319 struct bnx2i_endpoint
*ep
= (struct bnx2i_endpoint
*) cm_sk
->context
;
2321 ep
->state
= EP_STATE_TCP_FIN_RCVD
;
2323 bnx2i_recovery_que_add_conn(ep
->hba
, ep
->conn
);
2327 * bnx2i_cm_remote_abort - process TCP RST and start conn cleanup
2328 * @hba: adapter structure pointer
2329 * @update_kcqe: kcqe pointer
2331 * function callback exported via bnx2i - cnic driver interface to
2332 * indicate async TCP events (RST) sent by the peer.
2334 static void bnx2i_cm_remote_abort(struct cnic_sock
*cm_sk
)
2336 struct bnx2i_endpoint
*ep
= (struct bnx2i_endpoint
*) cm_sk
->context
;
2338 ep
->state
= EP_STATE_TCP_RST_RCVD
;
2340 bnx2i_recovery_que_add_conn(ep
->hba
, ep
->conn
);
2344 static void bnx2i_send_nl_mesg(struct cnic_dev
*dev
, u32 msg_type
,
2345 char *buf
, u16 buflen
)
2347 struct bnx2i_hba
*hba
;
2349 hba
= bnx2i_find_hba_for_cnic(dev
);
2353 if (iscsi_offload_mesg(hba
->shost
, &bnx2i_iscsi_transport
,
2354 msg_type
, buf
, buflen
))
2355 printk(KERN_ALERT
"bnx2i: private nl message send error\n");
2361 * bnx2i_cnic_cb - global template of bnx2i - cnic driver interface structure
2362 * carrying callback function pointers
2365 struct cnic_ulp_ops bnx2i_cnic_cb
= {
2366 .cnic_init
= bnx2i_ulp_init
,
2367 .cnic_exit
= bnx2i_ulp_exit
,
2368 .cnic_start
= bnx2i_start
,
2369 .cnic_stop
= bnx2i_stop
,
2370 .indicate_kcqes
= bnx2i_indicate_kcqe
,
2371 .indicate_netevent
= bnx2i_indicate_netevent
,
2372 .cm_connect_complete
= bnx2i_cm_connect_cmpl
,
2373 .cm_close_complete
= bnx2i_cm_close_cmpl
,
2374 .cm_abort_complete
= bnx2i_cm_abort_cmpl
,
2375 .cm_remote_close
= bnx2i_cm_remote_close
,
2376 .cm_remote_abort
= bnx2i_cm_remote_abort
,
2377 .iscsi_nl_send_msg
= bnx2i_send_nl_mesg
,
2378 .owner
= THIS_MODULE
2383 * bnx2i_map_ep_dbell_regs - map connection doorbell registers
2384 * @ep: bnx2i endpoint
2386 * maps connection's SQ and RQ doorbell registers, 5706/5708/5709 hosts these
2387 * register in BAR #0. Whereas in 57710 these register are accessed by
2390 int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint
*ep
)
2397 resource_size_t reg_base
;
2399 cid_num
= bnx2i_get_cid_num(ep
);
2401 if (test_bit(BNX2I_NX2_DEV_57710
, &ep
->hba
->cnic_dev_type
)) {
2402 reg_base
= pci_resource_start(ep
->hba
->pcidev
,
2403 BNX2X_DOORBELL_PCI_BAR
);
2404 reg_off
= PAGE_SIZE
* (cid_num
& 0x1FFFF) + DPM_TRIGER_TYPE
;
2405 ep
->qp
.ctx_base
= ioremap_nocache(reg_base
+ reg_off
, 4);
2409 reg_base
= ep
->hba
->netdev
->base_addr
;
2410 if ((test_bit(BNX2I_NX2_DEV_5709
, &ep
->hba
->cnic_dev_type
)) &&
2411 (ep
->hba
->mail_queue_access
== BNX2I_MQ_BIN_MODE
)) {
2412 config2
= REG_RD(ep
->hba
, BNX2_MQ_CONFIG2
);
2413 first_l4l5
= config2
& BNX2_MQ_CONFIG2_FIRST_L4L5
;
2414 ctx_sz
= (config2
& BNX2_MQ_CONFIG2_CONT_SZ
) >> 3;
2416 reg_off
= CTX_OFFSET
+ MAX_CID_CNT
* MB_KERNEL_CTX_SIZE
2417 + BNX2I_570X_PAGE_SIZE_DEFAULT
*
2418 (((cid_num
- first_l4l5
) / ctx_sz
) + 256);
2420 reg_off
= CTX_OFFSET
+ (MB_KERNEL_CTX_SIZE
* cid_num
);
2422 /* 5709 device in normal node and 5706/5708 devices */
2423 reg_off
= CTX_OFFSET
+ (MB_KERNEL_CTX_SIZE
* cid_num
);
2425 ep
->qp
.ctx_base
= ioremap_nocache(reg_base
+ reg_off
,
2426 MB_KERNEL_CTX_SIZE
);
2427 if (!ep
->qp
.ctx_base
)
2431 bnx2i_arm_cq_event_coalescing(ep
, CNIC_ARM_CQE
);