1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic iSCSI HBA Driver
4 * Copyright (c) 2003-2013 QLogic Corporation
10 #include "ql4_inline.h"
12 #include <scsi/scsi_tcq.h>
15 qla4xxx_space_in_req_ring(struct scsi_qla_host
*ha
, uint16_t req_cnt
)
19 /* Calculate number of free request entries. */
20 if ((req_cnt
+ 2) >= ha
->req_q_count
) {
21 cnt
= (uint16_t) ha
->isp_ops
->rd_shdw_req_q_out(ha
);
22 if (ha
->request_in
< cnt
)
23 ha
->req_q_count
= cnt
- ha
->request_in
;
25 ha
->req_q_count
= REQUEST_QUEUE_DEPTH
-
26 (ha
->request_in
- cnt
);
29 /* Check if room for request in request ring. */
30 if ((req_cnt
+ 2) < ha
->req_q_count
)
36 static void qla4xxx_advance_req_ring_ptr(struct scsi_qla_host
*ha
)
38 /* Advance request queue pointer */
39 if (ha
->request_in
== (REQUEST_QUEUE_DEPTH
- 1)) {
41 ha
->request_ptr
= ha
->request_ring
;
49 * qla4xxx_get_req_pkt - returns a valid entry in request queue.
50 * @ha: Pointer to host adapter structure.
51 * @queue_entry: Pointer to pointer to queue entry structure
53 * This routine performs the following tasks:
54 * - returns the current request_in pointer (if queue not full)
55 * - advances the request_in pointer
56 * - checks for queue full
58 static int qla4xxx_get_req_pkt(struct scsi_qla_host
*ha
,
59 struct queue_entry
**queue_entry
)
63 if (qla4xxx_space_in_req_ring(ha
, req_cnt
)) {
64 *queue_entry
= ha
->request_ptr
;
65 memset(*queue_entry
, 0, sizeof(**queue_entry
));
67 qla4xxx_advance_req_ring_ptr(ha
);
68 ha
->req_q_count
-= req_cnt
;
76 * qla4xxx_send_marker_iocb - issues marker iocb to HBA
77 * @ha: Pointer to host adapter structure.
78 * @ddb_entry: Pointer to device database entry
80 * @mrkr_mod: marker identifier
82 * This routine issues a marker IOCB.
84 int qla4xxx_send_marker_iocb(struct scsi_qla_host
*ha
,
85 struct ddb_entry
*ddb_entry
, uint64_t lun
, uint16_t mrkr_mod
)
87 struct qla4_marker_entry
*marker_entry
;
88 unsigned long flags
= 0;
89 uint8_t status
= QLA_SUCCESS
;
91 /* Acquire hardware specific lock */
92 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
94 /* Get pointer to the queue entry for the marker */
95 if (qla4xxx_get_req_pkt(ha
, (struct queue_entry
**) &marker_entry
) !=
98 goto exit_send_marker
;
101 /* Put the marker in the request queue */
102 marker_entry
->hdr
.entryType
= ET_MARKER
;
103 marker_entry
->hdr
.entryCount
= 1;
104 marker_entry
->target
= cpu_to_le16(ddb_entry
->fw_ddb_index
);
105 marker_entry
->modifier
= cpu_to_le16(mrkr_mod
);
106 int_to_scsilun(lun
, &marker_entry
->lun
);
109 /* Tell ISP it's got a new I/O request */
110 ha
->isp_ops
->queue_iocb(ha
);
113 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
117 static struct continuation_t1_entry
*
118 qla4xxx_alloc_cont_entry(struct scsi_qla_host
*ha
)
120 struct continuation_t1_entry
*cont_entry
;
122 cont_entry
= (struct continuation_t1_entry
*)ha
->request_ptr
;
124 qla4xxx_advance_req_ring_ptr(ha
);
126 /* Load packet defaults */
127 cont_entry
->hdr
.entryType
= ET_CONTINUE
;
128 cont_entry
->hdr
.entryCount
= 1;
129 cont_entry
->hdr
.systemDefined
= (uint8_t) cpu_to_le16(ha
->request_in
);
134 static uint16_t qla4xxx_calc_request_entries(uint16_t dsds
)
139 if (dsds
> COMMAND_SEG
) {
140 iocbs
+= (dsds
- COMMAND_SEG
) / CONTINUE_SEG
;
141 if ((dsds
- COMMAND_SEG
) % CONTINUE_SEG
)
147 static void qla4xxx_build_scsi_iocbs(struct srb
*srb
,
148 struct command_t3_entry
*cmd_entry
,
151 struct scsi_qla_host
*ha
;
153 struct data_seg_a64
*cur_dsd
;
154 struct scsi_cmnd
*cmd
;
155 struct scatterlist
*sg
;
161 if (!scsi_bufflen(cmd
) || cmd
->sc_data_direction
== DMA_NONE
) {
162 /* No data being transferred */
163 cmd_entry
->ttlByteCnt
= cpu_to_le32(0);
167 avail_dsds
= COMMAND_SEG
;
168 cur_dsd
= (struct data_seg_a64
*) & (cmd_entry
->dataseg
[0]);
170 scsi_for_each_sg(cmd
, sg
, tot_dsds
, i
) {
173 /* Allocate additional continuation packets? */
174 if (avail_dsds
== 0) {
175 struct continuation_t1_entry
*cont_entry
;
177 cont_entry
= qla4xxx_alloc_cont_entry(ha
);
179 (struct data_seg_a64
*)
180 &cont_entry
->dataseg
[0];
181 avail_dsds
= CONTINUE_SEG
;
184 sle_dma
= sg_dma_address(sg
);
185 cur_dsd
->base
.addrLow
= cpu_to_le32(LSDW(sle_dma
));
186 cur_dsd
->base
.addrHigh
= cpu_to_le32(MSDW(sle_dma
));
187 cur_dsd
->count
= cpu_to_le32(sg_dma_len(sg
));
194 void qla4_83xx_queue_iocb(struct scsi_qla_host
*ha
)
196 writel(ha
->request_in
, &ha
->qla4_83xx_reg
->req_q_in
);
197 readl(&ha
->qla4_83xx_reg
->req_q_in
);
200 void qla4_83xx_complete_iocb(struct scsi_qla_host
*ha
)
202 writel(ha
->response_out
, &ha
->qla4_83xx_reg
->rsp_q_out
);
203 readl(&ha
->qla4_83xx_reg
->rsp_q_out
);
207 * qla4_82xx_queue_iocb - Tell ISP it's got new request(s)
208 * @ha: pointer to host adapter structure.
210 * This routine notifies the ISP that one or more new request
211 * queue entries have been placed on the request queue.
213 void qla4_82xx_queue_iocb(struct scsi_qla_host
*ha
)
217 dbval
= 0x14 | (ha
->func_num
<< 5);
218 dbval
= dbval
| (0 << 8) | (ha
->request_in
<< 16);
220 qla4_82xx_wr_32(ha
, ha
->nx_db_wr_ptr
, ha
->request_in
);
224 * qla4_82xx_complete_iocb - Tell ISP we're done with response(s)
225 * @ha: pointer to host adapter structure.
227 * This routine notifies the ISP that one or more response/completion
228 * queue entries have been processed by the driver.
229 * This also clears the interrupt.
231 void qla4_82xx_complete_iocb(struct scsi_qla_host
*ha
)
233 writel(ha
->response_out
, &ha
->qla4_82xx_reg
->rsp_q_out
);
234 readl(&ha
->qla4_82xx_reg
->rsp_q_out
);
238 * qla4xxx_queue_iocb - Tell ISP it's got new request(s)
239 * @ha: pointer to host adapter structure.
241 * This routine is notifies the ISP that one or more new request
242 * queue entries have been placed on the request queue.
244 void qla4xxx_queue_iocb(struct scsi_qla_host
*ha
)
246 writel(ha
->request_in
, &ha
->reg
->req_q_in
);
247 readl(&ha
->reg
->req_q_in
);
251 * qla4xxx_complete_iocb - Tell ISP we're done with response(s)
252 * @ha: pointer to host adapter structure.
254 * This routine is notifies the ISP that one or more response/completion
255 * queue entries have been processed by the driver.
256 * This also clears the interrupt.
258 void qla4xxx_complete_iocb(struct scsi_qla_host
*ha
)
260 writel(ha
->response_out
, &ha
->reg
->rsp_q_out
);
261 readl(&ha
->reg
->rsp_q_out
);
265 * qla4xxx_send_command_to_isp - issues command to HBA
266 * @ha: pointer to host adapter structure.
267 * @srb: pointer to SCSI Request Block to be sent to ISP
269 * This routine is called by qla4xxx_queuecommand to build an ISP
270 * command and pass it to the ISP for execution.
272 int qla4xxx_send_command_to_isp(struct scsi_qla_host
*ha
, struct srb
* srb
)
274 struct scsi_cmnd
*cmd
= srb
->cmd
;
275 struct ddb_entry
*ddb_entry
;
276 struct command_t3_entry
*cmd_entry
;
283 /* Get real lun and adapter */
284 ddb_entry
= srb
->ddb
;
288 /* Acquire hardware specific lock */
289 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
291 index
= scsi_cmd_to_rq(cmd
)->tag
;
294 * Check to see if adapter is online before placing request on
295 * request queue. If a reset occurs and a request is in the queue,
296 * the firmware will still attempt to process the request, retrieving
297 * garbage for pointers.
299 if (!test_bit(AF_ONLINE
, &ha
->flags
)) {
300 DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! "
301 "Do not issue command.\n",
302 ha
->host_no
, __func__
));
306 /* Calculate the number of request entries needed. */
307 nseg
= scsi_dma_map(cmd
);
312 req_cnt
= qla4xxx_calc_request_entries(tot_dsds
);
313 if (!qla4xxx_space_in_req_ring(ha
, req_cnt
))
316 /* total iocbs active */
317 if ((ha
->iocb_cnt
+ req_cnt
) >= ha
->iocb_hiwat
)
320 /* Build command packet */
321 cmd_entry
= (struct command_t3_entry
*) ha
->request_ptr
;
322 memset(cmd_entry
, 0, sizeof(struct command_t3_entry
));
323 cmd_entry
->hdr
.entryType
= ET_COMMAND
;
324 cmd_entry
->handle
= cpu_to_le32(index
);
325 cmd_entry
->target
= cpu_to_le16(ddb_entry
->fw_ddb_index
);
327 int_to_scsilun(cmd
->device
->lun
, &cmd_entry
->lun
);
328 cmd_entry
->ttlByteCnt
= cpu_to_le32(scsi_bufflen(cmd
));
329 memcpy(cmd_entry
->cdb
, cmd
->cmnd
, cmd
->cmd_len
);
330 cmd_entry
->dataSegCnt
= cpu_to_le16(tot_dsds
);
331 cmd_entry
->hdr
.entryCount
= req_cnt
;
333 /* Set data transfer direction control flags
334 * NOTE: Look at data_direction bits iff there is data to be
335 * transferred, as the data direction bit is sometimed filled
336 * in when there is no data to be transferred */
337 cmd_entry
->control_flags
= CF_NO_DATA
;
338 if (scsi_bufflen(cmd
)) {
339 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
340 cmd_entry
->control_flags
= CF_WRITE
;
341 else if (cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
342 cmd_entry
->control_flags
= CF_READ
;
344 ha
->bytes_xfered
+= scsi_bufflen(cmd
);
345 if (ha
->bytes_xfered
& ~0xFFFFF){
346 ha
->total_mbytes_xferred
+= ha
->bytes_xfered
>> 20;
347 ha
->bytes_xfered
&= 0xFFFFF;
351 /* Set tagged queueing control flags */
352 cmd_entry
->control_flags
|= CF_SIMPLE_TAG
;
354 qla4xxx_advance_req_ring_ptr(ha
);
355 qla4xxx_build_scsi_iocbs(srb
, cmd_entry
, tot_dsds
);
358 srb
->cmd
->host_scribble
= (unsigned char *)(unsigned long)index
;
360 /* update counters */
361 srb
->state
= SRB_ACTIVE_STATE
;
362 srb
->flags
|= SRB_DMA_VALID
;
364 /* Track IOCB used */
365 ha
->iocb_cnt
+= req_cnt
;
366 srb
->iocb_cnt
= req_cnt
;
367 ha
->req_q_count
-= req_cnt
;
369 ha
->isp_ops
->queue_iocb(ha
);
370 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
378 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
383 int qla4xxx_send_passthru0(struct iscsi_task
*task
)
385 struct passthru0
*passthru_iocb
;
386 struct iscsi_session
*sess
= task
->conn
->session
;
387 struct ddb_entry
*ddb_entry
= sess
->dd_data
;
388 struct scsi_qla_host
*ha
= ddb_entry
->ha
;
389 struct ql4_task_data
*task_data
= task
->dd_data
;
390 uint16_t ctrl_flags
= 0;
394 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
395 task_data
->iocb_req_cnt
= 1;
396 /* Put the IOCB on the request queue */
397 if (!qla4xxx_space_in_req_ring(ha
, task_data
->iocb_req_cnt
))
400 passthru_iocb
= (struct passthru0
*) ha
->request_ptr
;
402 memset(passthru_iocb
, 0, sizeof(struct passthru0
));
403 passthru_iocb
->hdr
.entryType
= ET_PASSTHRU0
;
404 passthru_iocb
->hdr
.systemDefined
= SD_ISCSI_PDU
;
405 passthru_iocb
->hdr
.entryCount
= task_data
->iocb_req_cnt
;
406 passthru_iocb
->handle
= task
->itt
;
407 passthru_iocb
->target
= cpu_to_le16(ddb_entry
->fw_ddb_index
);
408 passthru_iocb
->timeout
= cpu_to_le16(PT_DEFAULT_TIMEOUT
);
410 /* Setup the out & in DSDs */
411 if (task_data
->req_len
) {
412 memcpy((uint8_t *)task_data
->req_buffer
+
413 sizeof(struct iscsi_hdr
), task
->data
, task
->data_count
);
414 ctrl_flags
|= PT_FLAG_SEND_BUFFER
;
415 passthru_iocb
->out_dsd
.base
.addrLow
=
416 cpu_to_le32(LSDW(task_data
->req_dma
));
417 passthru_iocb
->out_dsd
.base
.addrHigh
=
418 cpu_to_le32(MSDW(task_data
->req_dma
));
419 passthru_iocb
->out_dsd
.count
=
420 cpu_to_le32(task
->data_count
+
421 sizeof(struct iscsi_hdr
));
423 if (task_data
->resp_len
) {
424 passthru_iocb
->in_dsd
.base
.addrLow
=
425 cpu_to_le32(LSDW(task_data
->resp_dma
));
426 passthru_iocb
->in_dsd
.base
.addrHigh
=
427 cpu_to_le32(MSDW(task_data
->resp_dma
));
428 passthru_iocb
->in_dsd
.count
=
429 cpu_to_le32(task_data
->resp_len
);
432 ctrl_flags
|= (PT_FLAG_ISCSI_PDU
| PT_FLAG_WAIT_4_RESPONSE
);
433 passthru_iocb
->control_flags
= cpu_to_le16(ctrl_flags
);
435 /* Update the request pointer */
436 qla4xxx_advance_req_ring_ptr(ha
);
439 /* Track IOCB used */
440 ha
->iocb_cnt
+= task_data
->iocb_req_cnt
;
441 ha
->req_q_count
-= task_data
->iocb_req_cnt
;
442 ha
->isp_ops
->queue_iocb(ha
);
446 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
450 static struct mrb
*qla4xxx_get_new_mrb(struct scsi_qla_host
*ha
)
454 mrb
= kzalloc(sizeof(*mrb
), GFP_KERNEL
);
462 static int qla4xxx_send_mbox_iocb(struct scsi_qla_host
*ha
, struct mrb
*mrb
,
465 int rval
= QLA_SUCCESS
;
470 /* Acquire hardware specific lock */
471 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
473 /* Get pointer to the queue entry for the marker */
474 rval
= qla4xxx_get_req_pkt(ha
, (struct queue_entry
**) &(mrb
->mbox
));
475 if (rval
!= QLA_SUCCESS
)
478 index
= ha
->mrb_index
;
479 /* get valid mrb index*/
480 for (i
= 0; i
< MAX_MRB
; i
++) {
482 if (index
== MAX_MRB
)
484 if (ha
->active_mrb_array
[index
] == NULL
) {
485 ha
->mrb_index
= index
;
491 ha
->active_mrb_array
[index
] = mrb
;
492 mrb
->mbox
->handle
= index
;
493 mrb
->mbox
->hdr
.entryType
= ET_MBOX_CMD
;
494 mrb
->mbox
->hdr
.entryCount
= mrb
->iocb_cnt
;
495 memcpy(mrb
->mbox
->in_mbox
, in_mbox
, 32);
496 mrb
->mbox_cmd
= in_mbox
[0];
499 ha
->iocb_cnt
+= mrb
->iocb_cnt
;
500 ha
->isp_ops
->queue_iocb(ha
);
502 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
506 int qla4xxx_ping_iocb(struct scsi_qla_host
*ha
, uint32_t options
,
507 uint32_t payload_size
, uint32_t pid
, uint8_t *ipaddr
)
510 struct mrb
*mrb
= NULL
;
511 int rval
= QLA_SUCCESS
;
513 memset(in_mbox
, 0, sizeof(in_mbox
));
515 mrb
= qla4xxx_get_new_mrb(ha
);
517 DEBUG2(ql4_printk(KERN_WARNING
, ha
, "%s: fail to get new mrb\n",
523 in_mbox
[0] = MBOX_CMD_PING
;
524 in_mbox
[1] = options
;
525 memcpy(&in_mbox
[2], &ipaddr
[0], 4);
526 memcpy(&in_mbox
[3], &ipaddr
[4], 4);
527 memcpy(&in_mbox
[4], &ipaddr
[8], 4);
528 memcpy(&in_mbox
[5], &ipaddr
[12], 4);
529 in_mbox
[6] = payload_size
;
532 rval
= qla4xxx_send_mbox_iocb(ha
, mrb
, in_mbox
);
534 if (rval
!= QLA_SUCCESS
)