2 * Aic94xx SAS/SATA Tasks
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7 * This file is licensed under GPLv2.
9 * This file is part of the aic94xx driver.
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
27 #include <linux/spinlock.h>
29 #include "aic94xx_sas.h"
30 #include "aic94xx_hwi.h"
32 static void asd_unbuild_ata_ascb(struct asd_ascb
*a
);
33 static void asd_unbuild_smp_ascb(struct asd_ascb
*a
);
34 static void asd_unbuild_ssp_ascb(struct asd_ascb
*a
);
36 static inline void asd_can_dequeue(struct asd_ha_struct
*asd_ha
, int num
)
40 spin_lock_irqsave(&asd_ha
->seq
.pend_q_lock
, flags
);
41 asd_ha
->seq
.can_queue
+= num
;
42 spin_unlock_irqrestore(&asd_ha
->seq
.pend_q_lock
, flags
);
45 /* PCI_DMA_... to our direction translation.
47 static const u8 data_dir_flags
[] = {
48 [PCI_DMA_BIDIRECTIONAL
] = DATA_DIR_BYRECIPIENT
, /* UNSPECIFIED */
49 [PCI_DMA_TODEVICE
] = DATA_DIR_OUT
, /* OUTBOUND */
50 [PCI_DMA_FROMDEVICE
] = DATA_DIR_IN
, /* INBOUND */
51 [PCI_DMA_NONE
] = DATA_DIR_NONE
, /* NO TRANSFER */
54 static inline int asd_map_scatterlist(struct sas_task
*task
,
58 struct asd_ascb
*ascb
= task
->lldd_task
;
59 struct asd_ha_struct
*asd_ha
= ascb
->ha
;
60 struct scatterlist
*sc
;
63 if (task
->data_dir
== PCI_DMA_NONE
)
66 if (task
->num_scatter
== 0) {
67 void *p
= task
->scatter
;
68 dma_addr_t dma
= pci_map_single(asd_ha
->pcidev
, p
,
71 sg_arr
[0].bus_addr
= cpu_to_le64((u64
)dma
);
72 sg_arr
[0].size
= cpu_to_le32(task
->total_xfer_len
);
73 sg_arr
[0].flags
|= ASD_SG_EL_LIST_EOL
;
77 num_sg
= pci_map_sg(asd_ha
->pcidev
, task
->scatter
, task
->num_scatter
,
85 ascb
->sg_arr
= asd_alloc_coherent(asd_ha
,
86 num_sg
*sizeof(struct sg_el
),
92 for (sc
= task
->scatter
, i
= 0; i
< num_sg
; i
++, sc
++) {
94 &((struct sg_el
*)ascb
->sg_arr
->vaddr
)[i
];
95 sg
->bus_addr
= cpu_to_le64((u64
)sg_dma_address(sc
));
96 sg
->size
= cpu_to_le32((u32
)sg_dma_len(sc
));
98 sg
->flags
|= ASD_SG_EL_LIST_EOL
;
101 for (sc
= task
->scatter
, i
= 0; i
< 2; i
++, sc
++) {
103 cpu_to_le64((u64
)sg_dma_address(sc
));
104 sg_arr
[i
].size
= cpu_to_le32((u32
)sg_dma_len(sc
));
106 sg_arr
[1].next_sg_offs
= 2 * sizeof(*sg_arr
);
107 sg_arr
[1].flags
|= ASD_SG_EL_LIST_EOS
;
109 memset(&sg_arr
[2], 0, sizeof(*sg_arr
));
110 sg_arr
[2].bus_addr
=cpu_to_le64((u64
)ascb
->sg_arr
->dma_handle
);
113 for (sc
= task
->scatter
, i
= 0; i
< num_sg
; i
++, sc
++) {
115 cpu_to_le64((u64
)sg_dma_address(sc
));
116 sg_arr
[i
].size
= cpu_to_le32((u32
)sg_dma_len(sc
));
118 sg_arr
[i
-1].flags
|= ASD_SG_EL_LIST_EOL
;
123 pci_unmap_sg(asd_ha
->pcidev
, task
->scatter
, task
->num_scatter
,
128 static inline void asd_unmap_scatterlist(struct asd_ascb
*ascb
)
130 struct asd_ha_struct
*asd_ha
= ascb
->ha
;
131 struct sas_task
*task
= ascb
->uldd_task
;
133 if (task
->data_dir
== PCI_DMA_NONE
)
136 if (task
->num_scatter
== 0) {
137 dma_addr_t dma
= (dma_addr_t
)
138 le64_to_cpu(ascb
->scb
->ssp_task
.sg_element
[0].bus_addr
);
139 pci_unmap_single(ascb
->ha
->pcidev
, dma
, task
->total_xfer_len
,
144 asd_free_coherent(asd_ha
, ascb
->sg_arr
);
145 pci_unmap_sg(asd_ha
->pcidev
, task
->scatter
, task
->num_scatter
,
149 /* ---------- Task complete tasklet ---------- */
151 static void asd_get_response_tasklet(struct asd_ascb
*ascb
,
152 struct done_list_struct
*dl
)
154 struct asd_ha_struct
*asd_ha
= ascb
->ha
;
155 struct sas_task
*task
= ascb
->uldd_task
;
156 struct task_status_struct
*ts
= &task
->task_status
;
158 struct tc_resp_sb_struct
{
162 } __attribute__ ((packed
)) *resp_sb
= (void *) dl
->status_block
;
164 /* int size = ((resp_sb->flags & 7) << 8) | resp_sb->len_lsb; */
165 int edb_id
= ((resp_sb
->flags
& 0x70) >> 4)-1;
166 struct asd_ascb
*escb
;
167 struct asd_dma_tok
*edb
;
170 spin_lock_irqsave(&asd_ha
->seq
.tc_index_lock
, flags
);
171 escb
= asd_tc_index_find(&asd_ha
->seq
,
172 (int)le16_to_cpu(resp_sb
->index_escb
));
173 spin_unlock_irqrestore(&asd_ha
->seq
.tc_index_lock
, flags
);
176 ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
180 ts
->buf_valid_size
= 0;
181 edb
= asd_ha
->seq
.edb_arr
[edb_id
+ escb
->edb_index
];
183 if (task
->task_proto
== SAS_PROTO_SSP
) {
184 struct ssp_response_iu
*iu
=
185 r
+ 16 + sizeof(struct ssp_frame_hdr
);
187 ts
->residual
= le32_to_cpu(*(__le32
*)r
);
188 ts
->resp
= SAS_TASK_COMPLETE
;
189 if (iu
->datapres
== 0)
190 ts
->stat
= iu
->status
;
191 else if (iu
->datapres
== 1)
192 ts
->stat
= iu
->resp_data
[3];
193 else if (iu
->datapres
== 2) {
194 ts
->stat
= SAM_CHECK_COND
;
195 ts
->buf_valid_size
= min((u32
) SAS_STATUS_BUF_SIZE
,
196 be32_to_cpu(iu
->sense_data_len
));
197 memcpy(ts
->buf
, iu
->sense_data
, ts
->buf_valid_size
);
198 if (iu
->status
!= SAM_CHECK_COND
) {
199 ASD_DPRINTK("device %llx sent sense data, but "
200 "stat(0x%x) is not CHECK_CONDITION"
202 SAS_ADDR(task
->dev
->sas_addr
),
207 struct ata_task_resp
*resp
= (void *) &ts
->buf
[0];
209 ts
->residual
= le32_to_cpu(*(__le32
*)r
);
211 if (SAS_STATUS_BUF_SIZE
>= sizeof(*resp
)) {
212 resp
->frame_len
= le16_to_cpu(*(__le16
*)(r
+6));
213 memcpy(&resp
->ending_fis
[0], r
+16, 24);
214 ts
->buf_valid_size
= sizeof(*resp
);
218 asd_invalidate_edb(escb
, edb_id
);
221 static void asd_task_tasklet_complete(struct asd_ascb
*ascb
,
222 struct done_list_struct
*dl
)
224 struct sas_task
*task
= ascb
->uldd_task
;
225 struct task_status_struct
*ts
= &task
->task_status
;
227 u8 opcode
= dl
->opcode
;
229 asd_can_dequeue(ascb
->ha
, 1);
234 ts
->resp
= SAS_TASK_COMPLETE
;
238 ts
->resp
= SAS_TASK_COMPLETE
;
239 ts
->stat
= SAS_DATA_UNDERRUN
;
240 ts
->residual
= le32_to_cpu(*(__le32
*)dl
->status_block
);
243 ts
->resp
= SAS_TASK_COMPLETE
;
244 ts
->stat
= SAS_DATA_OVERRUN
;
249 ts
->resp
= SAS_TASK_COMPLETE
;
250 ts
->stat
= SAS_PROTO_RESPONSE
;
251 asd_get_response_tasklet(ascb
, dl
);
254 ts
->resp
= SAS_TASK_UNDELIVERED
;
255 ts
->stat
= SAS_OPEN_REJECT
;
256 if (dl
->status_block
[1] & 2)
257 ts
->open_rej_reason
= 1 + dl
->status_block
[2];
258 else if (dl
->status_block
[1] & 1)
259 ts
->open_rej_reason
= (dl
->status_block
[2] >> 4)+10;
261 ts
->open_rej_reason
= SAS_OREJ_UNKNOWN
;
264 ts
->resp
= SAS_TASK_UNDELIVERED
;
265 ts
->stat
= SAS_OPEN_TO
;
269 ts
->resp
= SAS_TASK_UNDELIVERED
;
270 ts
->stat
= SAS_PHY_DOWN
;
273 ts
->resp
= SAS_TASK_COMPLETE
;
274 ts
->stat
= SAS_PHY_DOWN
;
280 case TF_SMP_XMIT_RCV_ERR
:
281 case TC_ATA_R_ERR_RECV
:
282 ts
->resp
= SAS_TASK_COMPLETE
;
283 ts
->stat
= SAS_INTERRUPTED
;
289 ts
->resp
= SAS_TASK_UNDELIVERED
;
290 ts
->stat
= SAS_DEV_NO_RESPONSE
;
293 ts
->resp
= SAS_TASK_COMPLETE
;
294 ts
->stat
= SAS_NAK_R_ERR
;
296 case TA_I_T_NEXUS_LOSS
:
297 opcode
= dl
->status_block
[0];
300 case TF_INV_CONN_HANDLE
:
301 ts
->resp
= SAS_TASK_UNDELIVERED
;
302 ts
->stat
= SAS_DEVICE_UNKNOWN
;
304 case TF_REQUESTED_N_PENDING
:
305 ts
->resp
= SAS_TASK_UNDELIVERED
;
306 ts
->stat
= SAS_PENDING
;
308 case TC_TASK_CLEARED
:
310 ts
->resp
= SAS_TASK_COMPLETE
;
311 ts
->stat
= SAS_ABORTED_TASK
;
317 case TF_TMF_TAG_FREE
:
318 case TF_TMF_TASK_DONE
:
319 case TF_TMF_NO_CONN_HANDLE
:
322 case TF_DATA_OFFS_ERR
:
323 ts
->resp
= SAS_TASK_UNDELIVERED
;
324 ts
->stat
= SAS_DEV_NO_RESPONSE
;
327 case TC_LINK_ADM_RESP
:
330 case TC_PARTIAL_SG_LIST
:
332 ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __FUNCTION__
, opcode
);
336 switch (task
->task_proto
) {
339 asd_unbuild_ata_ascb(ascb
);
342 asd_unbuild_smp_ascb(ascb
);
345 asd_unbuild_ssp_ascb(ascb
);
350 spin_lock_irqsave(&task
->task_state_lock
, flags
);
351 task
->task_state_flags
&= ~SAS_TASK_STATE_PENDING
;
352 task
->task_state_flags
&= ~SAS_TASK_AT_INITIATOR
;
353 task
->task_state_flags
|= SAS_TASK_STATE_DONE
;
354 if (unlikely((task
->task_state_flags
& SAS_TASK_STATE_ABORTED
))) {
355 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
356 ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x "
357 "stat 0x%x but aborted by upper layer!\n",
358 task
, opcode
, ts
->resp
, ts
->stat
);
359 complete(&ascb
->completion
);
361 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
362 task
->lldd_task
= NULL
;
365 task
->task_done(task
);
369 /* ---------- ATA ---------- */
371 static int asd_build_ata_ascb(struct asd_ascb
*ascb
, struct sas_task
*task
,
374 struct domain_device
*dev
= task
->dev
;
381 if (unlikely(task
->ata_task
.device_control_reg_update
))
382 scb
->header
.opcode
= CONTROL_ATA_DEV
;
383 else if (dev
->sata_dev
.command_set
== ATA_COMMAND_SET
)
384 scb
->header
.opcode
= INITIATE_ATA_TASK
;
386 scb
->header
.opcode
= INITIATE_ATAPI_TASK
;
388 scb
->ata_task
.proto_conn_rate
= (1 << 5); /* STP */
389 if (dev
->port
->oob_mode
== SAS_OOB_MODE
)
390 scb
->ata_task
.proto_conn_rate
|= dev
->linkrate
;
392 scb
->ata_task
.total_xfer_len
= cpu_to_le32(task
->total_xfer_len
);
393 scb
->ata_task
.fis
= task
->ata_task
.fis
;
394 scb
->ata_task
.fis
.fis_type
= 0x27;
395 if (likely(!task
->ata_task
.device_control_reg_update
))
396 scb
->ata_task
.fis
.flags
|= 0x80; /* C=1: update ATA cmd reg */
397 scb
->ata_task
.fis
.flags
&= 0xF0; /* PM_PORT field shall be 0 */
398 if (dev
->sata_dev
.command_set
== ATAPI_COMMAND_SET
)
399 memcpy(scb
->ata_task
.atapi_packet
, task
->ata_task
.atapi_packet
,
401 scb
->ata_task
.sister_scb
= cpu_to_le16(0xFFFF);
402 scb
->ata_task
.conn_handle
= cpu_to_le16(
403 (u16
)(unsigned long)dev
->lldd_dev
);
405 if (likely(!task
->ata_task
.device_control_reg_update
)) {
407 if (task
->ata_task
.dma_xfer
)
408 flags
|= DATA_XFER_MODE_DMA
;
409 if (task
->ata_task
.use_ncq
&&
410 dev
->sata_dev
.command_set
!= ATAPI_COMMAND_SET
)
411 flags
|= ATA_Q_TYPE_NCQ
;
412 flags
|= data_dir_flags
[task
->data_dir
];
413 scb
->ata_task
.ata_flags
= flags
;
415 scb
->ata_task
.retry_count
= task
->ata_task
.retry_count
;
418 if (task
->ata_task
.set_affil_pol
)
419 flags
|= SET_AFFIL_POLICY
;
420 if (task
->ata_task
.stp_affil_pol
)
421 flags
|= STP_AFFIL_POLICY
;
422 scb
->ata_task
.flags
= flags
;
424 ascb
->tasklet_complete
= asd_task_tasklet_complete
;
426 if (likely(!task
->ata_task
.device_control_reg_update
))
427 res
= asd_map_scatterlist(task
, scb
->ata_task
.sg_element
,
433 static void asd_unbuild_ata_ascb(struct asd_ascb
*a
)
435 asd_unmap_scatterlist(a
);
438 /* ---------- SMP ---------- */
440 static int asd_build_smp_ascb(struct asd_ascb
*ascb
, struct sas_task
*task
,
443 struct asd_ha_struct
*asd_ha
= ascb
->ha
;
444 struct domain_device
*dev
= task
->dev
;
447 pci_map_sg(asd_ha
->pcidev
, &task
->smp_task
.smp_req
, 1,
449 pci_map_sg(asd_ha
->pcidev
, &task
->smp_task
.smp_resp
, 1,
454 scb
->header
.opcode
= INITIATE_SMP_TASK
;
456 scb
->smp_task
.proto_conn_rate
= dev
->linkrate
;
458 scb
->smp_task
.smp_req
.bus_addr
=
459 cpu_to_le64((u64
)sg_dma_address(&task
->smp_task
.smp_req
));
460 scb
->smp_task
.smp_req
.size
=
461 cpu_to_le32((u32
)sg_dma_len(&task
->smp_task
.smp_req
)-4);
463 scb
->smp_task
.smp_resp
.bus_addr
=
464 cpu_to_le64((u64
)sg_dma_address(&task
->smp_task
.smp_resp
));
465 scb
->smp_task
.smp_resp
.size
=
466 cpu_to_le32((u32
)sg_dma_len(&task
->smp_task
.smp_resp
)-4);
468 scb
->smp_task
.sister_scb
= cpu_to_le16(0xFFFF);
469 scb
->smp_task
.conn_handle
= cpu_to_le16((u16
)
470 (unsigned long)dev
->lldd_dev
);
472 ascb
->tasklet_complete
= asd_task_tasklet_complete
;
477 static void asd_unbuild_smp_ascb(struct asd_ascb
*a
)
479 struct sas_task
*task
= a
->uldd_task
;
482 pci_unmap_sg(a
->ha
->pcidev
, &task
->smp_task
.smp_req
, 1,
484 pci_unmap_sg(a
->ha
->pcidev
, &task
->smp_task
.smp_resp
, 1,
488 /* ---------- SSP ---------- */
490 static int asd_build_ssp_ascb(struct asd_ascb
*ascb
, struct sas_task
*task
,
493 struct domain_device
*dev
= task
->dev
;
499 scb
->header
.opcode
= INITIATE_SSP_TASK
;
501 scb
->ssp_task
.proto_conn_rate
= (1 << 4); /* SSP */
502 scb
->ssp_task
.proto_conn_rate
|= dev
->linkrate
;
503 scb
->ssp_task
.total_xfer_len
= cpu_to_le32(task
->total_xfer_len
);
504 scb
->ssp_task
.ssp_frame
.frame_type
= SSP_DATA
;
505 memcpy(scb
->ssp_task
.ssp_frame
.hashed_dest_addr
, dev
->hashed_sas_addr
,
506 HASHED_SAS_ADDR_SIZE
);
507 memcpy(scb
->ssp_task
.ssp_frame
.hashed_src_addr
,
508 dev
->port
->ha
->hashed_sas_addr
, HASHED_SAS_ADDR_SIZE
);
509 scb
->ssp_task
.ssp_frame
.tptt
= cpu_to_be16(0xFFFF);
511 memcpy(scb
->ssp_task
.ssp_cmd
.lun
, task
->ssp_task
.LUN
, 8);
512 if (task
->ssp_task
.enable_first_burst
)
513 scb
->ssp_task
.ssp_cmd
.efb_prio_attr
|= EFB_MASK
;
514 scb
->ssp_task
.ssp_cmd
.efb_prio_attr
|= (task
->ssp_task
.task_prio
<< 3);
515 scb
->ssp_task
.ssp_cmd
.efb_prio_attr
|= (task
->ssp_task
.task_attr
& 7);
516 memcpy(scb
->ssp_task
.ssp_cmd
.cdb
, task
->ssp_task
.cdb
, 16);
518 scb
->ssp_task
.sister_scb
= cpu_to_le16(0xFFFF);
519 scb
->ssp_task
.conn_handle
= cpu_to_le16(
520 (u16
)(unsigned long)dev
->lldd_dev
);
521 scb
->ssp_task
.data_dir
= data_dir_flags
[task
->data_dir
];
522 scb
->ssp_task
.retry_count
= scb
->ssp_task
.retry_count
;
524 ascb
->tasklet_complete
= asd_task_tasklet_complete
;
526 res
= asd_map_scatterlist(task
, scb
->ssp_task
.sg_element
, gfp_flags
);
531 static void asd_unbuild_ssp_ascb(struct asd_ascb
*a
)
533 asd_unmap_scatterlist(a
);
536 /* ---------- Execute Task ---------- */
538 static inline int asd_can_queue(struct asd_ha_struct
*asd_ha
, int num
)
543 spin_lock_irqsave(&asd_ha
->seq
.pend_q_lock
, flags
);
544 if ((asd_ha
->seq
.can_queue
- num
) < 0)
545 res
= -SAS_QUEUE_FULL
;
547 asd_ha
->seq
.can_queue
-= num
;
548 spin_unlock_irqrestore(&asd_ha
->seq
.pend_q_lock
, flags
);
553 int asd_execute_task(struct sas_task
*task
, const int num
,
558 struct sas_task
*t
= task
;
559 struct asd_ascb
*ascb
= NULL
, *a
;
560 struct asd_ha_struct
*asd_ha
= task
->dev
->port
->ha
->lldd_ha
;
563 res
= asd_can_queue(asd_ha
, num
);
568 ascb
= asd_ascb_alloc_list(asd_ha
, &res
, gfp_flags
);
574 __list_add(&alist
, ascb
->list
.prev
, &ascb
->list
);
575 list_for_each_entry(a
, &alist
, list
) {
578 t
= list_entry(t
->list
.next
, struct sas_task
, list
);
580 list_for_each_entry(a
, &alist
, list
) {
583 if (t
->task_proto
& SAS_PROTO_STP
)
584 t
->task_proto
= SAS_PROTO_STP
;
585 switch (t
->task_proto
) {
588 res
= asd_build_ata_ascb(a
, t
, gfp_flags
);
591 res
= asd_build_smp_ascb(a
, t
, gfp_flags
);
594 res
= asd_build_ssp_ascb(a
, t
, gfp_flags
);
597 asd_printk("unknown sas_task proto: 0x%x\n",
605 spin_lock_irqsave(&t
->task_state_lock
, flags
);
606 t
->task_state_flags
|= SAS_TASK_AT_INITIATOR
;
607 spin_unlock_irqrestore(&t
->task_state_lock
, flags
);
609 list_del_init(&alist
);
611 res
= asd_post_ascb_list(asd_ha
, ascb
, num
);
614 __list_add(&alist
, ascb
->list
.prev
, &ascb
->list
);
621 struct asd_ascb
*b
= a
;
622 list_for_each_entry(a
, &alist
, list
) {
626 spin_lock_irqsave(&t
->task_state_lock
, flags
);
627 t
->task_state_flags
&= ~SAS_TASK_AT_INITIATOR
;
628 spin_unlock_irqrestore(&t
->task_state_lock
, flags
);
629 switch (t
->task_proto
) {
632 asd_unbuild_ata_ascb(a
);
635 asd_unbuild_smp_ascb(a
);
638 asd_unbuild_ssp_ascb(a
);
645 list_del_init(&alist
);
648 asd_ascb_free_list(ascb
);
649 asd_can_dequeue(asd_ha
, num
);