2 * Aic94xx SAS/SATA Tasks
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7 * This file is licensed under GPLv2.
9 * This file is part of the aic94xx driver.
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
27 #include <linux/spinlock.h>
29 #include "aic94xx_sas.h"
30 #include "aic94xx_hwi.h"
32 static void asd_unbuild_ata_ascb(struct asd_ascb
*a
);
33 static void asd_unbuild_smp_ascb(struct asd_ascb
*a
);
34 static void asd_unbuild_ssp_ascb(struct asd_ascb
*a
);
36 static void asd_can_dequeue(struct asd_ha_struct
*asd_ha
, int num
)
40 spin_lock_irqsave(&asd_ha
->seq
.pend_q_lock
, flags
);
41 asd_ha
->seq
.can_queue
+= num
;
42 spin_unlock_irqrestore(&asd_ha
->seq
.pend_q_lock
, flags
);
45 /* DMA_... to our direction translation.
47 static const u8 data_dir_flags
[] = {
48 [DMA_BIDIRECTIONAL
] = DATA_DIR_BYRECIPIENT
, /* UNSPECIFIED */
49 [DMA_TO_DEVICE
] = DATA_DIR_OUT
, /* OUTBOUND */
50 [DMA_FROM_DEVICE
] = DATA_DIR_IN
, /* INBOUND */
51 [DMA_NONE
] = DATA_DIR_NONE
, /* NO TRANSFER */
54 static int asd_map_scatterlist(struct sas_task
*task
,
58 struct asd_ascb
*ascb
= task
->lldd_task
;
59 struct asd_ha_struct
*asd_ha
= ascb
->ha
;
60 struct scatterlist
*sc
;
63 if (task
->data_dir
== DMA_NONE
)
66 if (task
->num_scatter
== 0) {
67 void *p
= task
->scatter
;
68 dma_addr_t dma
= dma_map_single(&asd_ha
->pcidev
->dev
, p
,
71 sg_arr
[0].bus_addr
= cpu_to_le64((u64
)dma
);
72 sg_arr
[0].size
= cpu_to_le32(task
->total_xfer_len
);
73 sg_arr
[0].flags
|= ASD_SG_EL_LIST_EOL
;
77 /* STP tasks come from libata which has already mapped
79 if (sas_protocol_ata(task
->task_proto
))
80 num_sg
= task
->num_scatter
;
82 num_sg
= dma_map_sg(&asd_ha
->pcidev
->dev
, task
->scatter
,
83 task
->num_scatter
, task
->data_dir
);
90 ascb
->sg_arr
= asd_alloc_coherent(asd_ha
,
91 num_sg
*sizeof(struct sg_el
),
97 for_each_sg(task
->scatter
, sc
, num_sg
, i
) {
99 &((struct sg_el
*)ascb
->sg_arr
->vaddr
)[i
];
100 sg
->bus_addr
= cpu_to_le64((u64
)sg_dma_address(sc
));
101 sg
->size
= cpu_to_le32((u32
)sg_dma_len(sc
));
103 sg
->flags
|= ASD_SG_EL_LIST_EOL
;
106 for_each_sg(task
->scatter
, sc
, 2, i
) {
108 cpu_to_le64((u64
)sg_dma_address(sc
));
109 sg_arr
[i
].size
= cpu_to_le32((u32
)sg_dma_len(sc
));
111 sg_arr
[1].next_sg_offs
= 2 * sizeof(*sg_arr
);
112 sg_arr
[1].flags
|= ASD_SG_EL_LIST_EOS
;
114 memset(&sg_arr
[2], 0, sizeof(*sg_arr
));
115 sg_arr
[2].bus_addr
=cpu_to_le64((u64
)ascb
->sg_arr
->dma_handle
);
118 for_each_sg(task
->scatter
, sc
, num_sg
, i
) {
120 cpu_to_le64((u64
)sg_dma_address(sc
));
121 sg_arr
[i
].size
= cpu_to_le32((u32
)sg_dma_len(sc
));
123 sg_arr
[i
-1].flags
|= ASD_SG_EL_LIST_EOL
;
128 if (sas_protocol_ata(task
->task_proto
))
129 dma_unmap_sg(&asd_ha
->pcidev
->dev
, task
->scatter
,
130 task
->num_scatter
, task
->data_dir
);
134 static void asd_unmap_scatterlist(struct asd_ascb
*ascb
)
136 struct asd_ha_struct
*asd_ha
= ascb
->ha
;
137 struct sas_task
*task
= ascb
->uldd_task
;
139 if (task
->data_dir
== DMA_NONE
)
142 if (task
->num_scatter
== 0) {
143 dma_addr_t dma
= (dma_addr_t
)
144 le64_to_cpu(ascb
->scb
->ssp_task
.sg_element
[0].bus_addr
);
145 dma_unmap_single(&ascb
->ha
->pcidev
->dev
, dma
,
146 task
->total_xfer_len
, task
->data_dir
);
150 asd_free_coherent(asd_ha
, ascb
->sg_arr
);
151 if (task
->task_proto
!= SAS_PROTOCOL_STP
)
152 dma_unmap_sg(&asd_ha
->pcidev
->dev
, task
->scatter
,
153 task
->num_scatter
, task
->data_dir
);
156 /* ---------- Task complete tasklet ---------- */
158 static void asd_get_response_tasklet(struct asd_ascb
*ascb
,
159 struct done_list_struct
*dl
)
161 struct asd_ha_struct
*asd_ha
= ascb
->ha
;
162 struct sas_task
*task
= ascb
->uldd_task
;
163 struct task_status_struct
*ts
= &task
->task_status
;
165 struct tc_resp_sb_struct
{
169 } __attribute__ ((packed
)) *resp_sb
= (void *) dl
->status_block
;
171 /* int size = ((resp_sb->flags & 7) << 8) | resp_sb->len_lsb; */
172 int edb_id
= ((resp_sb
->flags
& 0x70) >> 4)-1;
173 struct asd_ascb
*escb
;
174 struct asd_dma_tok
*edb
;
177 spin_lock_irqsave(&asd_ha
->seq
.tc_index_lock
, flags
);
178 escb
= asd_tc_index_find(&asd_ha
->seq
,
179 (int)le16_to_cpu(resp_sb
->index_escb
));
180 spin_unlock_irqrestore(&asd_ha
->seq
.tc_index_lock
, flags
);
183 ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
187 ts
->buf_valid_size
= 0;
188 edb
= asd_ha
->seq
.edb_arr
[edb_id
+ escb
->edb_index
];
190 if (task
->task_proto
== SAS_PROTOCOL_SSP
) {
191 struct ssp_response_iu
*iu
=
192 r
+ 16 + sizeof(struct ssp_frame_hdr
);
194 ts
->residual
= le32_to_cpu(*(__le32
*)r
);
196 sas_ssp_task_response(&asd_ha
->pcidev
->dev
, task
, iu
);
198 struct ata_task_resp
*resp
= (void *) &ts
->buf
[0];
200 ts
->residual
= le32_to_cpu(*(__le32
*)r
);
202 if (SAS_STATUS_BUF_SIZE
>= sizeof(*resp
)) {
203 resp
->frame_len
= le16_to_cpu(*(__le16
*)(r
+6));
204 memcpy(&resp
->ending_fis
[0], r
+16, ATA_RESP_FIS_SIZE
);
205 ts
->buf_valid_size
= sizeof(*resp
);
209 asd_invalidate_edb(escb
, edb_id
);
212 static void asd_task_tasklet_complete(struct asd_ascb
*ascb
,
213 struct done_list_struct
*dl
)
215 struct sas_task
*task
= ascb
->uldd_task
;
216 struct task_status_struct
*ts
= &task
->task_status
;
218 u8 opcode
= dl
->opcode
;
220 asd_can_dequeue(ascb
->ha
, 1);
225 ts
->resp
= SAS_TASK_COMPLETE
;
226 ts
->stat
= SAM_STAT_GOOD
;
229 ts
->resp
= SAS_TASK_COMPLETE
;
230 ts
->stat
= SAS_DATA_UNDERRUN
;
231 ts
->residual
= le32_to_cpu(*(__le32
*)dl
->status_block
);
234 ts
->resp
= SAS_TASK_COMPLETE
;
235 ts
->stat
= SAS_DATA_OVERRUN
;
240 ts
->resp
= SAS_TASK_COMPLETE
;
241 ts
->stat
= SAS_PROTO_RESPONSE
;
242 asd_get_response_tasklet(ascb
, dl
);
245 ts
->resp
= SAS_TASK_UNDELIVERED
;
246 ts
->stat
= SAS_OPEN_REJECT
;
247 if (dl
->status_block
[1] & 2)
248 ts
->open_rej_reason
= 1 + dl
->status_block
[2];
249 else if (dl
->status_block
[1] & 1)
250 ts
->open_rej_reason
= (dl
->status_block
[2] >> 4)+10;
252 ts
->open_rej_reason
= SAS_OREJ_UNKNOWN
;
255 ts
->resp
= SAS_TASK_UNDELIVERED
;
256 ts
->stat
= SAS_OPEN_TO
;
260 ts
->resp
= SAS_TASK_UNDELIVERED
;
261 ts
->stat
= SAS_PHY_DOWN
;
264 ts
->resp
= SAS_TASK_COMPLETE
;
265 ts
->stat
= SAS_PHY_DOWN
;
271 case TF_SMP_XMIT_RCV_ERR
:
272 case TC_ATA_R_ERR_RECV
:
273 ts
->resp
= SAS_TASK_COMPLETE
;
274 ts
->stat
= SAS_INTERRUPTED
;
280 ts
->resp
= SAS_TASK_UNDELIVERED
;
281 ts
->stat
= SAS_DEV_NO_RESPONSE
;
284 ts
->resp
= SAS_TASK_COMPLETE
;
285 ts
->stat
= SAS_NAK_R_ERR
;
287 case TA_I_T_NEXUS_LOSS
:
288 opcode
= dl
->status_block
[0];
291 case TF_INV_CONN_HANDLE
:
292 ts
->resp
= SAS_TASK_UNDELIVERED
;
293 ts
->stat
= SAS_DEVICE_UNKNOWN
;
295 case TF_REQUESTED_N_PENDING
:
296 ts
->resp
= SAS_TASK_UNDELIVERED
;
297 ts
->stat
= SAS_PENDING
;
299 case TC_TASK_CLEARED
:
301 ts
->resp
= SAS_TASK_COMPLETE
;
302 ts
->stat
= SAS_ABORTED_TASK
;
308 case TF_TMF_TAG_FREE
:
309 case TF_TMF_TASK_DONE
:
310 case TF_TMF_NO_CONN_HANDLE
:
313 case TF_DATA_OFFS_ERR
:
314 ts
->resp
= SAS_TASK_UNDELIVERED
;
315 ts
->stat
= SAS_DEV_NO_RESPONSE
;
318 case TC_LINK_ADM_RESP
:
321 case TC_PARTIAL_SG_LIST
:
323 ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __func__
, opcode
);
327 switch (task
->task_proto
) {
328 case SAS_PROTOCOL_SATA
:
329 case SAS_PROTOCOL_STP
:
330 asd_unbuild_ata_ascb(ascb
);
332 case SAS_PROTOCOL_SMP
:
333 asd_unbuild_smp_ascb(ascb
);
335 case SAS_PROTOCOL_SSP
:
336 asd_unbuild_ssp_ascb(ascb
);
341 spin_lock_irqsave(&task
->task_state_lock
, flags
);
342 task
->task_state_flags
&= ~SAS_TASK_STATE_PENDING
;
343 task
->task_state_flags
&= ~SAS_TASK_AT_INITIATOR
;
344 task
->task_state_flags
|= SAS_TASK_STATE_DONE
;
345 if (unlikely((task
->task_state_flags
& SAS_TASK_STATE_ABORTED
))) {
346 struct completion
*completion
= ascb
->completion
;
347 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
348 ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x "
349 "stat 0x%x but aborted by upper layer!\n",
350 task
, opcode
, ts
->resp
, ts
->stat
);
352 complete(completion
);
354 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
355 task
->lldd_task
= NULL
;
358 task
->task_done(task
);
362 /* ---------- ATA ---------- */
364 static int asd_build_ata_ascb(struct asd_ascb
*ascb
, struct sas_task
*task
,
367 struct domain_device
*dev
= task
->dev
;
374 if (unlikely(task
->ata_task
.device_control_reg_update
))
375 scb
->header
.opcode
= CONTROL_ATA_DEV
;
376 else if (dev
->sata_dev
.class == ATA_DEV_ATAPI
)
377 scb
->header
.opcode
= INITIATE_ATAPI_TASK
;
379 scb
->header
.opcode
= INITIATE_ATA_TASK
;
381 scb
->ata_task
.proto_conn_rate
= (1 << 5); /* STP */
382 if (dev
->port
->oob_mode
== SAS_OOB_MODE
)
383 scb
->ata_task
.proto_conn_rate
|= dev
->linkrate
;
385 scb
->ata_task
.total_xfer_len
= cpu_to_le32(task
->total_xfer_len
);
386 scb
->ata_task
.fis
= task
->ata_task
.fis
;
387 if (likely(!task
->ata_task
.device_control_reg_update
))
388 scb
->ata_task
.fis
.flags
|= 0x80; /* C=1: update ATA cmd reg */
389 scb
->ata_task
.fis
.flags
&= 0xF0; /* PM_PORT field shall be 0 */
390 if (dev
->sata_dev
.class == ATA_DEV_ATAPI
)
391 memcpy(scb
->ata_task
.atapi_packet
, task
->ata_task
.atapi_packet
,
393 scb
->ata_task
.sister_scb
= cpu_to_le16(0xFFFF);
394 scb
->ata_task
.conn_handle
= cpu_to_le16(
395 (u16
)(unsigned long)dev
->lldd_dev
);
397 if (likely(!task
->ata_task
.device_control_reg_update
)) {
399 if (task
->ata_task
.dma_xfer
)
400 flags
|= DATA_XFER_MODE_DMA
;
401 if (task
->ata_task
.use_ncq
&&
402 dev
->sata_dev
.class != ATA_DEV_ATAPI
)
403 flags
|= ATA_Q_TYPE_NCQ
;
404 flags
|= data_dir_flags
[task
->data_dir
];
405 scb
->ata_task
.ata_flags
= flags
;
407 scb
->ata_task
.retry_count
= task
->ata_task
.retry_count
;
410 if (task
->ata_task
.set_affil_pol
)
411 flags
|= SET_AFFIL_POLICY
;
412 if (task
->ata_task
.stp_affil_pol
)
413 flags
|= STP_AFFIL_POLICY
;
414 scb
->ata_task
.flags
= flags
;
416 ascb
->tasklet_complete
= asd_task_tasklet_complete
;
418 if (likely(!task
->ata_task
.device_control_reg_update
))
419 res
= asd_map_scatterlist(task
, scb
->ata_task
.sg_element
,
425 static void asd_unbuild_ata_ascb(struct asd_ascb
*a
)
427 asd_unmap_scatterlist(a
);
430 /* ---------- SMP ---------- */
432 static int asd_build_smp_ascb(struct asd_ascb
*ascb
, struct sas_task
*task
,
435 struct asd_ha_struct
*asd_ha
= ascb
->ha
;
436 struct domain_device
*dev
= task
->dev
;
439 dma_map_sg(&asd_ha
->pcidev
->dev
, &task
->smp_task
.smp_req
, 1,
441 dma_map_sg(&asd_ha
->pcidev
->dev
, &task
->smp_task
.smp_resp
, 1,
446 scb
->header
.opcode
= INITIATE_SMP_TASK
;
448 scb
->smp_task
.proto_conn_rate
= dev
->linkrate
;
450 scb
->smp_task
.smp_req
.bus_addr
=
451 cpu_to_le64((u64
)sg_dma_address(&task
->smp_task
.smp_req
));
452 scb
->smp_task
.smp_req
.size
=
453 cpu_to_le32((u32
)sg_dma_len(&task
->smp_task
.smp_req
)-4);
455 scb
->smp_task
.smp_resp
.bus_addr
=
456 cpu_to_le64((u64
)sg_dma_address(&task
->smp_task
.smp_resp
));
457 scb
->smp_task
.smp_resp
.size
=
458 cpu_to_le32((u32
)sg_dma_len(&task
->smp_task
.smp_resp
)-4);
460 scb
->smp_task
.sister_scb
= cpu_to_le16(0xFFFF);
461 scb
->smp_task
.conn_handle
= cpu_to_le16((u16
)
462 (unsigned long)dev
->lldd_dev
);
464 ascb
->tasklet_complete
= asd_task_tasklet_complete
;
469 static void asd_unbuild_smp_ascb(struct asd_ascb
*a
)
471 struct sas_task
*task
= a
->uldd_task
;
474 dma_unmap_sg(&a
->ha
->pcidev
->dev
, &task
->smp_task
.smp_req
, 1,
476 dma_unmap_sg(&a
->ha
->pcidev
->dev
, &task
->smp_task
.smp_resp
, 1,
480 /* ---------- SSP ---------- */
482 static int asd_build_ssp_ascb(struct asd_ascb
*ascb
, struct sas_task
*task
,
485 struct domain_device
*dev
= task
->dev
;
491 scb
->header
.opcode
= INITIATE_SSP_TASK
;
493 scb
->ssp_task
.proto_conn_rate
= (1 << 4); /* SSP */
494 scb
->ssp_task
.proto_conn_rate
|= dev
->linkrate
;
495 scb
->ssp_task
.total_xfer_len
= cpu_to_le32(task
->total_xfer_len
);
496 scb
->ssp_task
.ssp_frame
.frame_type
= SSP_DATA
;
497 memcpy(scb
->ssp_task
.ssp_frame
.hashed_dest_addr
, dev
->hashed_sas_addr
,
498 HASHED_SAS_ADDR_SIZE
);
499 memcpy(scb
->ssp_task
.ssp_frame
.hashed_src_addr
,
500 dev
->port
->ha
->hashed_sas_addr
, HASHED_SAS_ADDR_SIZE
);
501 scb
->ssp_task
.ssp_frame
.tptt
= cpu_to_be16(0xFFFF);
503 memcpy(scb
->ssp_task
.ssp_cmd
.lun
, task
->ssp_task
.LUN
, 8);
504 if (task
->ssp_task
.enable_first_burst
)
505 scb
->ssp_task
.ssp_cmd
.efb_prio_attr
|= EFB_MASK
;
506 scb
->ssp_task
.ssp_cmd
.efb_prio_attr
|= (task
->ssp_task
.task_prio
<< 3);
507 scb
->ssp_task
.ssp_cmd
.efb_prio_attr
|= (task
->ssp_task
.task_attr
& 7);
508 memcpy(scb
->ssp_task
.ssp_cmd
.cdb
, task
->ssp_task
.cmd
->cmnd
,
509 task
->ssp_task
.cmd
->cmd_len
);
511 scb
->ssp_task
.sister_scb
= cpu_to_le16(0xFFFF);
512 scb
->ssp_task
.conn_handle
= cpu_to_le16(
513 (u16
)(unsigned long)dev
->lldd_dev
);
514 scb
->ssp_task
.data_dir
= data_dir_flags
[task
->data_dir
];
515 scb
->ssp_task
.retry_count
= scb
->ssp_task
.retry_count
;
517 ascb
->tasklet_complete
= asd_task_tasklet_complete
;
519 res
= asd_map_scatterlist(task
, scb
->ssp_task
.sg_element
, gfp_flags
);
524 static void asd_unbuild_ssp_ascb(struct asd_ascb
*a
)
526 asd_unmap_scatterlist(a
);
529 /* ---------- Execute Task ---------- */
531 static int asd_can_queue(struct asd_ha_struct
*asd_ha
, int num
)
536 spin_lock_irqsave(&asd_ha
->seq
.pend_q_lock
, flags
);
537 if ((asd_ha
->seq
.can_queue
- num
) < 0)
538 res
= -SAS_QUEUE_FULL
;
540 asd_ha
->seq
.can_queue
-= num
;
541 spin_unlock_irqrestore(&asd_ha
->seq
.pend_q_lock
, flags
);
546 int asd_execute_task(struct sas_task
*task
, gfp_t gfp_flags
)
550 struct sas_task
*t
= task
;
551 struct asd_ascb
*ascb
= NULL
, *a
;
552 struct asd_ha_struct
*asd_ha
= task
->dev
->port
->ha
->lldd_ha
;
555 res
= asd_can_queue(asd_ha
, 1);
560 ascb
= asd_ascb_alloc_list(asd_ha
, &res
, gfp_flags
);
566 __list_add(&alist
, ascb
->list
.prev
, &ascb
->list
);
567 list_for_each_entry(a
, &alist
, list
) {
572 list_for_each_entry(a
, &alist
, list
) {
575 if (t
->task_proto
& SAS_PROTOCOL_STP
)
576 t
->task_proto
= SAS_PROTOCOL_STP
;
577 switch (t
->task_proto
) {
578 case SAS_PROTOCOL_SATA
:
579 case SAS_PROTOCOL_STP
:
580 res
= asd_build_ata_ascb(a
, t
, gfp_flags
);
582 case SAS_PROTOCOL_SMP
:
583 res
= asd_build_smp_ascb(a
, t
, gfp_flags
);
585 case SAS_PROTOCOL_SSP
:
586 res
= asd_build_ssp_ascb(a
, t
, gfp_flags
);
589 asd_printk("unknown sas_task proto: 0x%x\n",
597 spin_lock_irqsave(&t
->task_state_lock
, flags
);
598 t
->task_state_flags
|= SAS_TASK_AT_INITIATOR
;
599 spin_unlock_irqrestore(&t
->task_state_lock
, flags
);
601 list_del_init(&alist
);
603 res
= asd_post_ascb_list(asd_ha
, ascb
, 1);
606 __list_add(&alist
, ascb
->list
.prev
, &ascb
->list
);
613 struct asd_ascb
*b
= a
;
614 list_for_each_entry(a
, &alist
, list
) {
618 spin_lock_irqsave(&t
->task_state_lock
, flags
);
619 t
->task_state_flags
&= ~SAS_TASK_AT_INITIATOR
;
620 spin_unlock_irqrestore(&t
->task_state_lock
, flags
);
621 switch (t
->task_proto
) {
622 case SAS_PROTOCOL_SATA
:
623 case SAS_PROTOCOL_STP
:
624 asd_unbuild_ata_ascb(a
);
626 case SAS_PROTOCOL_SMP
:
627 asd_unbuild_smp_ascb(a
);
629 case SAS_PROTOCOL_SSP
:
630 asd_unbuild_ssp_ascb(a
);
637 list_del_init(&alist
);
640 asd_ascb_free_list(ascb
);
641 asd_can_dequeue(asd_ha
, 1);