2 * Marvell 88SE64xx/88SE94xx main function
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
8 * This file is licensed under GPLv2.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; version 2 of the
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
28 static int mvs_find_tag(struct mvs_info
*mvi
, struct sas_task
*task
, u32
*tag
)
30 if (task
->lldd_task
) {
31 struct mvs_slot_info
*slot
;
32 slot
= task
->lldd_task
;
33 *tag
= slot
->slot_tag
;
39 void mvs_tag_clear(struct mvs_info
*mvi
, u32 tag
)
41 void *bitmap
= &mvi
->tags
;
42 clear_bit(tag
, bitmap
);
45 void mvs_tag_free(struct mvs_info
*mvi
, u32 tag
)
47 mvs_tag_clear(mvi
, tag
);
50 void mvs_tag_set(struct mvs_info
*mvi
, unsigned int tag
)
52 void *bitmap
= &mvi
->tags
;
56 inline int mvs_tag_alloc(struct mvs_info
*mvi
, u32
*tag_out
)
58 unsigned int index
, tag
;
59 void *bitmap
= &mvi
->tags
;
61 index
= find_first_zero_bit(bitmap
, mvi
->tags_num
);
63 if (tag
>= mvi
->tags_num
)
64 return -SAS_QUEUE_FULL
;
65 mvs_tag_set(mvi
, tag
);
70 void mvs_tag_init(struct mvs_info
*mvi
)
73 for (i
= 0; i
< mvi
->tags_num
; ++i
)
74 mvs_tag_clear(mvi
, i
);
77 void mvs_hexdump(u32 size
, u8
*data
, u32 baseaddr
)
85 printk(KERN_DEBUG
"%08X : ", baseaddr
+ offset
);
91 for (i
= 0; i
< 16; i
++) {
93 printk(KERN_DEBUG
"%02X ", (u32
)data
[i
]);
95 printk(KERN_DEBUG
" ");
97 printk(KERN_DEBUG
": ");
98 for (i
= 0; i
< run
; i
++)
99 printk(KERN_DEBUG
"%c",
100 isalnum(data
[i
]) ? data
[i
] : '.');
101 printk(KERN_DEBUG
"\n");
105 printk(KERN_DEBUG
"\n");
109 static void mvs_hba_sb_dump(struct mvs_info
*mvi
, u32 tag
,
110 enum sas_protocol proto
)
113 struct mvs_slot_info
*slot
= &mvi
->slot_info
[tag
];
115 offset
= slot
->cmd_size
+ MVS_OAF_SZ
+
116 MVS_CHIP_DISP
->prd_size() * slot
->n_elem
;
117 dev_printk(KERN_DEBUG
, mvi
->dev
, "+---->Status buffer[%d] :\n",
119 mvs_hexdump(32, (u8
*) slot
->response
,
120 (u32
) slot
->buf_dma
+ offset
);
124 static void mvs_hba_memory_dump(struct mvs_info
*mvi
, u32 tag
,
125 enum sas_protocol proto
)
130 struct mvs_slot_info
*slot
= &mvi
->slot_info
[tag
];
133 sz
= MVS_CHIP_SLOT_SZ
;
136 dev_printk(KERN_DEBUG
, mvi
->dev
,
137 "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz
, w_ptr
);
138 dev_printk(KERN_DEBUG
, mvi
->dev
,
139 "Delivery Queue Base Address=0x%llX (PA)"
140 "(tx_dma=0x%llX), Entry=%04d\n",
141 addr
, (unsigned long long)mvi
->tx_dma
, w_ptr
);
142 mvs_hexdump(sizeof(u32
), (u8
*)(&mvi
->tx
[mvi
->tx_prod
]),
143 (u32
) mvi
->tx_dma
+ sizeof(u32
) * w_ptr
);
145 addr
= mvi
->slot_dma
;
146 dev_printk(KERN_DEBUG
, mvi
->dev
,
147 "Command List Base Address=0x%llX (PA)"
148 "(slot_dma=0x%llX), Header=%03d\n",
149 addr
, (unsigned long long)slot
->buf_dma
, tag
);
150 dev_printk(KERN_DEBUG
, mvi
->dev
, "Command Header[%03d]:\n", tag
);
152 mvs_hexdump(sizeof(struct mvs_cmd_hdr
), (u8
*)(&mvi
->slot
[tag
]),
153 (u32
) mvi
->slot_dma
+ tag
* sizeof(struct mvs_cmd_hdr
));
154 /*1.command table area */
155 dev_printk(KERN_DEBUG
, mvi
->dev
, "+---->Command Table :\n");
156 mvs_hexdump(slot
->cmd_size
, (u8
*) slot
->buf
, (u32
) slot
->buf_dma
);
157 /*2.open address frame area */
158 dev_printk(KERN_DEBUG
, mvi
->dev
, "+---->Open Address Frame :\n");
159 mvs_hexdump(MVS_OAF_SZ
, (u8
*) slot
->buf
+ slot
->cmd_size
,
160 (u32
) slot
->buf_dma
+ slot
->cmd_size
);
162 mvs_hba_sb_dump(mvi
, tag
, proto
);
164 dev_printk(KERN_DEBUG
, mvi
->dev
, "+---->PRD table :\n");
165 mvs_hexdump(MVS_CHIP_DISP
->prd_size() * slot
->n_elem
,
166 (u8
*) slot
->buf
+ slot
->cmd_size
+ MVS_OAF_SZ
,
167 (u32
) slot
->buf_dma
+ slot
->cmd_size
+ MVS_OAF_SZ
);
171 static void mvs_hba_cq_dump(struct mvs_info
*mvi
)
175 void __iomem
*regs
= mvi
->regs
;
176 u32 entry
= mvi
->rx_cons
+ 1;
177 u32 rx_desc
= le32_to_cpu(mvi
->rx
[entry
]);
179 /*Completion Queue */
180 addr
= mr32(RX_HI
) << 16 << 16 | mr32(RX_LO
);
181 dev_printk(KERN_DEBUG
, mvi
->dev
, "Completion Task = 0x%p\n",
182 mvi
->slot_info
[rx_desc
& RXQ_SLOT_MASK
].task
);
183 dev_printk(KERN_DEBUG
, mvi
->dev
,
184 "Completion List Base Address=0x%llX (PA), "
185 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
186 addr
, entry
- 1, mvi
->rx
[0]);
187 mvs_hexdump(sizeof(u32
), (u8
*)(&rx_desc
),
188 mvi
->rx_dma
+ sizeof(u32
) * entry
);
192 void mvs_get_sas_addr(void *buf
, u32 buflen
)
194 /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/
197 struct mvs_info
*mvs_find_dev_mvi(struct domain_device
*dev
)
199 unsigned long i
= 0, j
= 0, hi
= 0;
200 struct sas_ha_struct
*sha
= dev
->port
->ha
;
201 struct mvs_info
*mvi
= NULL
;
202 struct asd_sas_phy
*phy
;
204 while (sha
->sas_port
[i
]) {
205 if (sha
->sas_port
[i
] == dev
->port
) {
206 phy
= container_of(sha
->sas_port
[i
]->phy_list
.next
,
207 struct asd_sas_phy
, port_phy_el
);
209 while (sha
->sas_phy
[j
]) {
210 if (sha
->sas_phy
[j
] == phy
)
218 hi
= j
/((struct mvs_prv_info
*)sha
->lldd_ha
)->n_phy
;
219 mvi
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->mvi
[hi
];
226 int mvs_find_dev_phyno(struct domain_device
*dev
, int *phyno
)
228 unsigned long i
= 0, j
= 0, n
= 0, num
= 0;
229 struct mvs_device
*mvi_dev
= (struct mvs_device
*)dev
->lldd_dev
;
230 struct mvs_info
*mvi
= mvi_dev
->mvi_info
;
231 struct sas_ha_struct
*sha
= dev
->port
->ha
;
233 while (sha
->sas_port
[i
]) {
234 if (sha
->sas_port
[i
] == dev
->port
) {
235 struct asd_sas_phy
*phy
;
236 list_for_each_entry(phy
,
237 &sha
->sas_port
[i
]->phy_list
, port_phy_el
) {
239 while (sha
->sas_phy
[j
]) {
240 if (sha
->sas_phy
[j
] == phy
)
244 phyno
[n
] = (j
>= mvi
->chip
->n_phy
) ?
245 (j
- mvi
->chip
->n_phy
) : j
;
256 static inline void mvs_free_reg_set(struct mvs_info
*mvi
,
257 struct mvs_device
*dev
)
260 mv_printk("device has been free.\n");
263 if (dev
->taskfileset
== MVS_ID_NOT_MAPPED
)
265 MVS_CHIP_DISP
->free_reg_set(mvi
, &dev
->taskfileset
);
268 static inline u8
mvs_assign_reg_set(struct mvs_info
*mvi
,
269 struct mvs_device
*dev
)
271 if (dev
->taskfileset
!= MVS_ID_NOT_MAPPED
)
273 return MVS_CHIP_DISP
->assign_reg_set(mvi
, &dev
->taskfileset
);
276 void mvs_phys_reset(struct mvs_info
*mvi
, u32 phy_mask
, int hard
)
279 for_each_phy(phy_mask
, phy_mask
, no
) {
282 MVS_CHIP_DISP
->phy_reset(mvi
, no
, hard
);
286 /* FIXME: locking? */
287 int mvs_phy_control(struct asd_sas_phy
*sas_phy
, enum phy_func func
,
290 int rc
= 0, phy_id
= sas_phy
->id
;
292 struct sas_ha_struct
*sha
= sas_phy
->ha
;
293 struct mvs_info
*mvi
= NULL
;
295 while (sha
->sas_phy
[i
]) {
296 if (sha
->sas_phy
[i
] == sas_phy
)
300 hi
= i
/((struct mvs_prv_info
*)sha
->lldd_ha
)->n_phy
;
301 mvi
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->mvi
[hi
];
304 case PHY_FUNC_SET_LINK_RATE
:
305 MVS_CHIP_DISP
->phy_set_link_rate(mvi
, phy_id
, funcdata
);
308 case PHY_FUNC_HARD_RESET
:
309 tmp
= MVS_CHIP_DISP
->read_phy_ctl(mvi
, phy_id
);
310 if (tmp
& PHY_RST_HARD
)
312 MVS_CHIP_DISP
->phy_reset(mvi
, phy_id
, 1);
315 case PHY_FUNC_LINK_RESET
:
316 MVS_CHIP_DISP
->phy_enable(mvi
, phy_id
);
317 MVS_CHIP_DISP
->phy_reset(mvi
, phy_id
, 0);
320 case PHY_FUNC_DISABLE
:
321 MVS_CHIP_DISP
->phy_disable(mvi
, phy_id
);
323 case PHY_FUNC_RELEASE_SPINUP_HOLD
:
331 void __devinit
mvs_set_sas_addr(struct mvs_info
*mvi
, int port_id
,
332 u32 off_lo
, u32 off_hi
, u64 sas_addr
)
334 u32 lo
= (u32
)sas_addr
;
335 u32 hi
= (u32
)(sas_addr
>>32);
337 MVS_CHIP_DISP
->write_port_cfg_addr(mvi
, port_id
, off_lo
);
338 MVS_CHIP_DISP
->write_port_cfg_data(mvi
, port_id
, lo
);
339 MVS_CHIP_DISP
->write_port_cfg_addr(mvi
, port_id
, off_hi
);
340 MVS_CHIP_DISP
->write_port_cfg_data(mvi
, port_id
, hi
);
343 static void mvs_bytes_dmaed(struct mvs_info
*mvi
, int i
)
345 struct mvs_phy
*phy
= &mvi
->phy
[i
];
346 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
347 struct sas_ha_struct
*sas_ha
;
348 if (!phy
->phy_attached
)
351 if (!(phy
->att_dev_info
& PORT_DEV_TRGT_MASK
)
352 && phy
->phy_type
& PORT_TYPE_SAS
) {
357 sas_ha
->notify_phy_event(sas_phy
, PHYE_OOB_DONE
);
360 struct sas_phy
*sphy
= sas_phy
->phy
;
362 sphy
->negotiated_linkrate
= sas_phy
->linkrate
;
363 sphy
->minimum_linkrate
= phy
->minimum_linkrate
;
364 sphy
->minimum_linkrate_hw
= SAS_LINK_RATE_1_5_GBPS
;
365 sphy
->maximum_linkrate
= phy
->maximum_linkrate
;
366 sphy
->maximum_linkrate_hw
= MVS_CHIP_DISP
->phy_max_link_rate();
369 if (phy
->phy_type
& PORT_TYPE_SAS
) {
370 struct sas_identify_frame
*id
;
372 id
= (struct sas_identify_frame
*)phy
->frame_rcvd
;
373 id
->dev_type
= phy
->identify
.device_type
;
374 id
->initiator_bits
= SAS_PROTOCOL_ALL
;
375 id
->target_bits
= phy
->identify
.target_port_protocols
;
376 } else if (phy
->phy_type
& PORT_TYPE_SATA
) {
379 mv_dprintk("phy %d byte dmaded.\n", i
+ mvi
->id
* mvi
->chip
->n_phy
);
381 sas_phy
->frame_rcvd_size
= phy
->frame_rcvd_size
;
383 mvi
->sas
->notify_port_event(sas_phy
,
387 int mvs_slave_alloc(struct scsi_device
*scsi_dev
)
389 struct domain_device
*dev
= sdev_to_domain_dev(scsi_dev
);
390 if (dev_is_sata(dev
)) {
391 /* We don't need to rescan targets
392 * if REPORT_LUNS request is failed
394 if (scsi_dev
->lun
> 0)
396 scsi_dev
->tagged_supported
= 1;
399 return sas_slave_alloc(scsi_dev
);
402 int mvs_slave_configure(struct scsi_device
*sdev
)
404 struct domain_device
*dev
= sdev_to_domain_dev(sdev
);
405 int ret
= sas_slave_configure(sdev
);
409 if (dev_is_sata(dev
)) {
410 /* may set PIO mode */
412 struct ata_port
*ap
= dev
->sata_dev
.ap
;
413 struct ata_device
*adev
= ap
->link
.device
;
414 adev
->flags
|= ATA_DFLAG_NCQ_OFF
;
415 scsi_adjust_queue_depth(sdev
, MSG_SIMPLE_TAG
, 1);
421 void mvs_scan_start(struct Scsi_Host
*shost
)
424 unsigned short core_nr
;
425 struct mvs_info
*mvi
;
426 struct sas_ha_struct
*sha
= SHOST_TO_SAS_HA(shost
);
428 core_nr
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->n_host
;
430 for (j
= 0; j
< core_nr
; j
++) {
431 mvi
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->mvi
[j
];
432 for (i
= 0; i
< mvi
->chip
->n_phy
; ++i
)
433 mvs_bytes_dmaed(mvi
, i
);
437 int mvs_scan_finished(struct Scsi_Host
*shost
, unsigned long time
)
439 /* give the phy enabling interrupt event time to come in (1s
440 * is empirically about all it takes) */
443 /* Wait for discovery to finish */
444 scsi_flush_work(shost
);
448 static int mvs_task_prep_smp(struct mvs_info
*mvi
,
449 struct mvs_task_exec_info
*tei
)
452 struct sas_task
*task
= tei
->task
;
453 struct mvs_cmd_hdr
*hdr
= tei
->hdr
;
454 struct domain_device
*dev
= task
->dev
;
455 struct asd_sas_port
*sas_port
= dev
->port
;
456 struct scatterlist
*sg_req
, *sg_resp
;
457 u32 req_len
, resp_len
, tag
= tei
->tag
;
460 dma_addr_t buf_tmp_dma
;
462 struct mvs_slot_info
*slot
= &mvi
->slot_info
[tag
];
463 u32 flags
= (tei
->n_elem
<< MCH_PRD_LEN_SHIFT
);
469 * DMA-map SMP request, response buffers
471 sg_req
= &task
->smp_task
.smp_req
;
472 elem
= dma_map_sg(mvi
->dev
, sg_req
, 1, PCI_DMA_TODEVICE
);
475 req_len
= sg_dma_len(sg_req
);
477 sg_resp
= &task
->smp_task
.smp_resp
;
478 elem
= dma_map_sg(mvi
->dev
, sg_resp
, 1, PCI_DMA_FROMDEVICE
);
483 resp_len
= SB_RFB_MAX
;
485 /* must be in dwords */
486 if ((req_len
& 0x3) || (resp_len
& 0x3)) {
492 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
495 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */
497 buf_tmp_dma
= slot
->buf_dma
;
501 hdr
->cmd_tbl
= cpu_to_le64(buf_tmp_dma
);
503 buf_tmp_dma
+= req_len
;
504 slot
->cmd_size
= req_len
;
506 hdr
->cmd_tbl
= cpu_to_le64(sg_dma_address(sg_req
));
509 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
511 hdr
->open_frame
= cpu_to_le64(buf_tmp_dma
);
513 buf_tmp
+= MVS_OAF_SZ
;
514 buf_tmp_dma
+= MVS_OAF_SZ
;
516 /* region 3: PRD table *********************************** */
519 hdr
->prd_tbl
= cpu_to_le64(buf_tmp_dma
);
523 i
= MVS_CHIP_DISP
->prd_size() * tei
->n_elem
;
527 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
528 slot
->response
= buf_tmp
;
529 hdr
->status_buf
= cpu_to_le64(buf_tmp_dma
);
530 if (mvi
->flags
& MVF_FLAG_SOC
)
531 hdr
->reserved
[0] = 0;
534 * Fill in TX ring and command slot header
536 slot
->tx
= mvi
->tx_prod
;
537 mvi
->tx
[mvi
->tx_prod
] = cpu_to_le32((TXQ_CMD_SMP
<< TXQ_CMD_SHIFT
) |
539 (sas_port
->phy_mask
<< TXQ_PHY_SHIFT
));
542 hdr
->lens
= cpu_to_le32(((resp_len
/ 4) << 16) | ((req_len
- 4) / 4));
543 hdr
->tags
= cpu_to_le32(tag
);
546 /* generate open address frame hdr (first 12 bytes) */
547 /* initiator, SMP, ftype 1h */
548 buf_oaf
[0] = (1 << 7) | (PROTOCOL_SMP
<< 4) | 0x01;
549 buf_oaf
[1] = dev
->linkrate
& 0xf;
550 *(u16
*)(buf_oaf
+ 2) = 0xFFFF; /* SAS SPEC */
551 memcpy(buf_oaf
+ 4, dev
->sas_addr
, SAS_ADDR_SIZE
);
553 /* fill in PRD (scatter/gather) table, if any */
554 MVS_CHIP_DISP
->make_prd(task
->scatter
, tei
->n_elem
, buf_prd
);
558 from
= kmap_atomic(sg_page(sg_req
), KM_IRQ0
);
559 memcpy(buf_cmd
, from
+ sg_req
->offset
, req_len
);
560 kunmap_atomic(from
, KM_IRQ0
);
565 dma_unmap_sg(mvi
->dev
, &tei
->task
->smp_task
.smp_resp
, 1,
568 dma_unmap_sg(mvi
->dev
, &tei
->task
->smp_task
.smp_req
, 1,
573 static u32
mvs_get_ncq_tag(struct sas_task
*task
, u32
*tag
)
575 struct ata_queued_cmd
*qc
= task
->uldd_task
;
578 if (qc
->tf
.command
== ATA_CMD_FPDMA_WRITE
||
579 qc
->tf
.command
== ATA_CMD_FPDMA_READ
) {
588 static int mvs_task_prep_ata(struct mvs_info
*mvi
,
589 struct mvs_task_exec_info
*tei
)
591 struct sas_task
*task
= tei
->task
;
592 struct domain_device
*dev
= task
->dev
;
593 struct mvs_device
*mvi_dev
= dev
->lldd_dev
;
594 struct mvs_cmd_hdr
*hdr
= tei
->hdr
;
595 struct asd_sas_port
*sas_port
= dev
->port
;
596 struct mvs_slot_info
*slot
;
598 u32 tag
= tei
->tag
, hdr_tag
;
601 u8
*buf_cmd
, *buf_oaf
;
602 dma_addr_t buf_tmp_dma
;
603 u32 i
, req_len
, resp_len
;
604 const u32 max_resp_len
= SB_RFB_MAX
;
606 if (mvs_assign_reg_set(mvi
, mvi_dev
) == MVS_ID_NOT_MAPPED
) {
607 mv_dprintk("Have not enough regiset for dev %d.\n",
611 slot
= &mvi
->slot_info
[tag
];
612 slot
->tx
= mvi
->tx_prod
;
613 del_q
= TXQ_MODE_I
| tag
|
614 (TXQ_CMD_STP
<< TXQ_CMD_SHIFT
) |
615 (sas_port
->phy_mask
<< TXQ_PHY_SHIFT
) |
616 (mvi_dev
->taskfileset
<< TXQ_SRS_SHIFT
);
617 mvi
->tx
[mvi
->tx_prod
] = cpu_to_le32(del_q
);
619 #ifndef DISABLE_HOTPLUG_DMA_FIX
620 if (task
->data_dir
== DMA_FROM_DEVICE
)
621 flags
= (MVS_CHIP_DISP
->prd_count() << MCH_PRD_LEN_SHIFT
);
623 flags
= (tei
->n_elem
<< MCH_PRD_LEN_SHIFT
);
625 flags
= (tei
->n_elem
<< MCH_PRD_LEN_SHIFT
);
627 if (task
->ata_task
.use_ncq
)
629 if (dev
->sata_dev
.command_set
== ATAPI_COMMAND_SET
) {
630 if (task
->ata_task
.fis
.command
!= ATA_CMD_ID_ATAPI
)
634 /* FIXME: fill in port multiplier number */
636 hdr
->flags
= cpu_to_le32(flags
);
638 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
639 if (task
->ata_task
.use_ncq
&& mvs_get_ncq_tag(task
, &hdr_tag
))
640 task
->ata_task
.fis
.sector_count
|= (u8
) (hdr_tag
<< 3);
644 hdr
->tags
= cpu_to_le32(hdr_tag
);
646 hdr
->data_len
= cpu_to_le32(task
->total_xfer_len
);
649 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
652 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
653 buf_cmd
= buf_tmp
= slot
->buf
;
654 buf_tmp_dma
= slot
->buf_dma
;
656 hdr
->cmd_tbl
= cpu_to_le64(buf_tmp_dma
);
658 buf_tmp
+= MVS_ATA_CMD_SZ
;
659 buf_tmp_dma
+= MVS_ATA_CMD_SZ
;
661 slot
->cmd_size
= MVS_ATA_CMD_SZ
;
664 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
665 /* used for STP. unused for SATA? */
667 hdr
->open_frame
= cpu_to_le64(buf_tmp_dma
);
669 buf_tmp
+= MVS_OAF_SZ
;
670 buf_tmp_dma
+= MVS_OAF_SZ
;
672 /* region 3: PRD table ********************************************* */
676 hdr
->prd_tbl
= cpu_to_le64(buf_tmp_dma
);
679 i
= MVS_CHIP_DISP
->prd_size() * MVS_CHIP_DISP
->prd_count();
684 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
685 /* FIXME: probably unused, for SATA. kept here just in case
686 * we get a STP/SATA error information record
688 slot
->response
= buf_tmp
;
689 hdr
->status_buf
= cpu_to_le64(buf_tmp_dma
);
690 if (mvi
->flags
& MVF_FLAG_SOC
)
691 hdr
->reserved
[0] = 0;
693 req_len
= sizeof(struct host_to_dev_fis
);
694 resp_len
= MVS_SLOT_BUF_SZ
- MVS_ATA_CMD_SZ
-
695 sizeof(struct mvs_err_info
) - i
;
697 /* request, response lengths */
698 resp_len
= min(resp_len
, max_resp_len
);
699 hdr
->lens
= cpu_to_le32(((resp_len
/ 4) << 16) | (req_len
/ 4));
701 if (likely(!task
->ata_task
.device_control_reg_update
))
702 task
->ata_task
.fis
.flags
|= 0x80; /* C=1: update ATA cmd reg */
703 /* fill in command FIS and ATAPI CDB */
704 memcpy(buf_cmd
, &task
->ata_task
.fis
, sizeof(struct host_to_dev_fis
));
705 if (dev
->sata_dev
.command_set
== ATAPI_COMMAND_SET
)
706 memcpy(buf_cmd
+ STP_ATAPI_CMD
,
707 task
->ata_task
.atapi_packet
, 16);
709 /* generate open address frame hdr (first 12 bytes) */
710 /* initiator, STP, ftype 1h */
711 buf_oaf
[0] = (1 << 7) | (PROTOCOL_STP
<< 4) | 0x1;
712 buf_oaf
[1] = dev
->linkrate
& 0xf;
713 *(u16
*)(buf_oaf
+ 2) = cpu_to_be16(mvi_dev
->device_id
+ 1);
714 memcpy(buf_oaf
+ 4, dev
->sas_addr
, SAS_ADDR_SIZE
);
716 /* fill in PRD (scatter/gather) table, if any */
717 MVS_CHIP_DISP
->make_prd(task
->scatter
, tei
->n_elem
, buf_prd
);
718 #ifndef DISABLE_HOTPLUG_DMA_FIX
719 if (task
->data_dir
== DMA_FROM_DEVICE
)
720 MVS_CHIP_DISP
->dma_fix(mvi
->bulk_buffer_dma
,
721 TRASH_BUCKET_SIZE
, tei
->n_elem
, buf_prd
);
726 static int mvs_task_prep_ssp(struct mvs_info
*mvi
,
727 struct mvs_task_exec_info
*tei
, int is_tmf
,
728 struct mvs_tmf_task
*tmf
)
730 struct sas_task
*task
= tei
->task
;
731 struct mvs_cmd_hdr
*hdr
= tei
->hdr
;
732 struct mvs_port
*port
= tei
->port
;
733 struct domain_device
*dev
= task
->dev
;
734 struct mvs_device
*mvi_dev
= dev
->lldd_dev
;
735 struct asd_sas_port
*sas_port
= dev
->port
;
736 struct mvs_slot_info
*slot
;
738 struct ssp_frame_hdr
*ssp_hdr
;
740 u8
*buf_cmd
, *buf_oaf
, fburst
= 0;
741 dma_addr_t buf_tmp_dma
;
743 u32 resp_len
, req_len
, i
, tag
= tei
->tag
;
744 const u32 max_resp_len
= SB_RFB_MAX
;
747 slot
= &mvi
->slot_info
[tag
];
749 phy_mask
= ((port
->wide_port_phymap
) ? port
->wide_port_phymap
:
750 sas_port
->phy_mask
) & TXQ_PHY_MASK
;
752 slot
->tx
= mvi
->tx_prod
;
753 mvi
->tx
[mvi
->tx_prod
] = cpu_to_le32(TXQ_MODE_I
| tag
|
754 (TXQ_CMD_SSP
<< TXQ_CMD_SHIFT
) |
755 (phy_mask
<< TXQ_PHY_SHIFT
));
758 if (task
->ssp_task
.enable_first_burst
) {
763 flags
|= (MCH_SSP_FR_TASK
<< MCH_SSP_FR_TYPE_SHIFT
);
764 hdr
->flags
= cpu_to_le32(flags
| (tei
->n_elem
<< MCH_PRD_LEN_SHIFT
));
765 hdr
->tags
= cpu_to_le32(tag
);
766 hdr
->data_len
= cpu_to_le32(task
->total_xfer_len
);
769 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
772 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
773 buf_cmd
= buf_tmp
= slot
->buf
;
774 buf_tmp_dma
= slot
->buf_dma
;
776 hdr
->cmd_tbl
= cpu_to_le64(buf_tmp_dma
);
778 buf_tmp
+= MVS_SSP_CMD_SZ
;
779 buf_tmp_dma
+= MVS_SSP_CMD_SZ
;
781 slot
->cmd_size
= MVS_SSP_CMD_SZ
;
784 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
786 hdr
->open_frame
= cpu_to_le64(buf_tmp_dma
);
788 buf_tmp
+= MVS_OAF_SZ
;
789 buf_tmp_dma
+= MVS_OAF_SZ
;
791 /* region 3: PRD table ********************************************* */
794 hdr
->prd_tbl
= cpu_to_le64(buf_tmp_dma
);
798 i
= MVS_CHIP_DISP
->prd_size() * tei
->n_elem
;
802 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
803 slot
->response
= buf_tmp
;
804 hdr
->status_buf
= cpu_to_le64(buf_tmp_dma
);
805 if (mvi
->flags
& MVF_FLAG_SOC
)
806 hdr
->reserved
[0] = 0;
808 resp_len
= MVS_SLOT_BUF_SZ
- MVS_SSP_CMD_SZ
- MVS_OAF_SZ
-
809 sizeof(struct mvs_err_info
) - i
;
810 resp_len
= min(resp_len
, max_resp_len
);
812 req_len
= sizeof(struct ssp_frame_hdr
) + 28;
814 /* request, response lengths */
815 hdr
->lens
= cpu_to_le32(((resp_len
/ 4) << 16) | (req_len
/ 4));
817 /* generate open address frame hdr (first 12 bytes) */
818 /* initiator, SSP, ftype 1h */
819 buf_oaf
[0] = (1 << 7) | (PROTOCOL_SSP
<< 4) | 0x1;
820 buf_oaf
[1] = dev
->linkrate
& 0xf;
821 *(u16
*)(buf_oaf
+ 2) = cpu_to_be16(mvi_dev
->device_id
+ 1);
822 memcpy(buf_oaf
+ 4, dev
->sas_addr
, SAS_ADDR_SIZE
);
824 /* fill in SSP frame header (Command Table.SSP frame header) */
825 ssp_hdr
= (struct ssp_frame_hdr
*)buf_cmd
;
828 ssp_hdr
->frame_type
= SSP_TASK
;
830 ssp_hdr
->frame_type
= SSP_COMMAND
;
832 memcpy(ssp_hdr
->hashed_dest_addr
, dev
->hashed_sas_addr
,
833 HASHED_SAS_ADDR_SIZE
);
834 memcpy(ssp_hdr
->hashed_src_addr
,
835 dev
->hashed_sas_addr
, HASHED_SAS_ADDR_SIZE
);
836 ssp_hdr
->tag
= cpu_to_be16(tag
);
838 /* fill in IU for TASK and Command Frame */
839 buf_cmd
+= sizeof(*ssp_hdr
);
840 memcpy(buf_cmd
, &task
->ssp_task
.LUN
, 8);
842 if (ssp_hdr
->frame_type
!= SSP_TASK
) {
843 buf_cmd
[9] = fburst
| task
->ssp_task
.task_attr
|
844 (task
->ssp_task
.task_prio
<< 3);
845 memcpy(buf_cmd
+ 12, &task
->ssp_task
.cdb
, 16);
847 buf_cmd
[10] = tmf
->tmf
;
852 (tmf
->tag_of_task_to_be_managed
>> 8) & 0xff;
854 tmf
->tag_of_task_to_be_managed
& 0xff;
860 /* fill in PRD (scatter/gather) table, if any */
861 MVS_CHIP_DISP
->make_prd(task
->scatter
, tei
->n_elem
, buf_prd
);
865 #define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
866 static int mvs_task_prep(struct sas_task
*task
, struct mvs_info
*mvi
, int is_tmf
,
867 struct mvs_tmf_task
*tmf
, int *pass
)
869 struct domain_device
*dev
= task
->dev
;
870 struct mvs_device
*mvi_dev
= dev
->lldd_dev
;
871 struct mvs_task_exec_info tei
;
872 struct mvs_slot_info
*slot
;
873 u32 tag
= 0xdeadbeef, n_elem
= 0;
877 struct task_status_struct
*tsm
= &task
->task_status
;
879 tsm
->resp
= SAS_TASK_UNDELIVERED
;
880 tsm
->stat
= SAS_PHY_DOWN
;
882 * libsas will use dev->port, should
883 * not call task_done for sata
885 if (dev
->dev_type
!= SATA_DEV
)
886 task
->task_done(task
);
890 if (DEV_IS_GONE(mvi_dev
)) {
892 mv_dprintk("device %d not ready.\n",
895 mv_dprintk("device %016llx not ready.\n",
896 SAS_ADDR(dev
->sas_addr
));
901 tei
.port
= dev
->port
->lldd_port
;
902 if (tei
.port
&& !tei
.port
->port_attached
&& !tmf
) {
903 if (sas_protocol_ata(task
->task_proto
)) {
904 struct task_status_struct
*ts
= &task
->task_status
;
905 mv_dprintk("SATA/STP port %d does not attach"
906 "device.\n", dev
->port
->id
);
907 ts
->resp
= SAS_TASK_COMPLETE
;
908 ts
->stat
= SAS_PHY_DOWN
;
910 task
->task_done(task
);
913 struct task_status_struct
*ts
= &task
->task_status
;
914 mv_dprintk("SAS port %d does not attach"
915 "device.\n", dev
->port
->id
);
916 ts
->resp
= SAS_TASK_UNDELIVERED
;
917 ts
->stat
= SAS_PHY_DOWN
;
918 task
->task_done(task
);
923 if (!sas_protocol_ata(task
->task_proto
)) {
924 if (task
->num_scatter
) {
925 n_elem
= dma_map_sg(mvi
->dev
,
935 n_elem
= task
->num_scatter
;
938 rc
= mvs_tag_alloc(mvi
, &tag
);
942 slot
= &mvi
->slot_info
[tag
];
944 task
->lldd_task
= NULL
;
945 slot
->n_elem
= n_elem
;
946 slot
->slot_tag
= tag
;
948 slot
->buf
= pci_pool_alloc(mvi
->dma_pool
, GFP_ATOMIC
, &slot
->buf_dma
);
951 memset(slot
->buf
, 0, MVS_SLOT_BUF_SZ
);
954 tei
.hdr
= &mvi
->slot
[tag
];
957 switch (task
->task_proto
) {
958 case SAS_PROTOCOL_SMP
:
959 rc
= mvs_task_prep_smp(mvi
, &tei
);
961 case SAS_PROTOCOL_SSP
:
962 rc
= mvs_task_prep_ssp(mvi
, &tei
, is_tmf
, tmf
);
964 case SAS_PROTOCOL_SATA
:
965 case SAS_PROTOCOL_STP
:
966 case SAS_PROTOCOL_SATA
| SAS_PROTOCOL_STP
:
967 rc
= mvs_task_prep_ata(mvi
, &tei
);
970 dev_printk(KERN_ERR
, mvi
->dev
,
971 "unknown sas_task proto: 0x%x\n",
978 mv_dprintk("rc is %x\n", rc
);
979 goto err_out_slot_buf
;
982 slot
->port
= tei
.port
;
983 task
->lldd_task
= slot
;
984 list_add_tail(&slot
->entry
, &tei
.port
->list
);
985 spin_lock(&task
->task_state_lock
);
986 task
->task_state_flags
|= SAS_TASK_AT_INITIATOR
;
987 spin_unlock(&task
->task_state_lock
);
989 mvs_hba_memory_dump(mvi
, tag
, task
->task_proto
);
990 mvi_dev
->running_req
++;
992 mvi
->tx_prod
= (mvi
->tx_prod
+ 1) & (MVS_CHIP_SLOT_SZ
- 1);
997 pci_pool_free(mvi
->dma_pool
, slot
->buf
, slot
->buf_dma
);
999 mvs_tag_free(mvi
, tag
);
1002 dev_printk(KERN_ERR
, mvi
->dev
, "mvsas prep failed[%d]!\n", rc
);
1003 if (!sas_protocol_ata(task
->task_proto
))
1005 dma_unmap_sg(mvi
->dev
, task
->scatter
, n_elem
,
1011 static struct mvs_task_list
*mvs_task_alloc_list(int *num
, gfp_t gfp_flags
)
1013 struct mvs_task_list
*first
= NULL
;
1015 for (; *num
> 0; --*num
) {
1016 struct mvs_task_list
*mvs_list
= kmem_cache_zalloc(mvs_task_list_cache
, gfp_flags
);
1021 INIT_LIST_HEAD(&mvs_list
->list
);
1025 list_add_tail(&mvs_list
->list
, &first
->list
);
1032 static inline void mvs_task_free_list(struct mvs_task_list
*mvs_list
)
1035 struct list_head
*pos
, *a
;
1036 struct mvs_task_list
*mlist
= NULL
;
1038 __list_add(&list
, mvs_list
->list
.prev
, &mvs_list
->list
);
1040 list_for_each_safe(pos
, a
, &list
) {
1042 mlist
= list_entry(pos
, struct mvs_task_list
, list
);
1043 kmem_cache_free(mvs_task_list_cache
, mlist
);
1047 static int mvs_task_exec(struct sas_task
*task
, const int num
, gfp_t gfp_flags
,
1048 struct completion
*completion
, int is_tmf
,
1049 struct mvs_tmf_task
*tmf
)
1051 struct domain_device
*dev
= task
->dev
;
1052 struct mvs_info
*mvi
= NULL
;
1055 unsigned long flags
= 0;
1057 mvi
= ((struct mvs_device
*)task
->dev
->lldd_dev
)->mvi_info
;
1059 if ((dev
->dev_type
== SATA_DEV
) && (dev
->sata_dev
.ap
!= NULL
))
1060 spin_unlock_irq(dev
->sata_dev
.ap
->lock
);
1062 spin_lock_irqsave(&mvi
->lock
, flags
);
1063 rc
= mvs_task_prep(task
, mvi
, is_tmf
, tmf
, &pass
);
1065 dev_printk(KERN_ERR
, mvi
->dev
, "mvsas exec failed[%d]!\n", rc
);
1068 MVS_CHIP_DISP
->start_delivery(mvi
, (mvi
->tx_prod
- 1) &
1069 (MVS_CHIP_SLOT_SZ
- 1));
1070 spin_unlock_irqrestore(&mvi
->lock
, flags
);
1072 if ((dev
->dev_type
== SATA_DEV
) && (dev
->sata_dev
.ap
!= NULL
))
1073 spin_lock_irq(dev
->sata_dev
.ap
->lock
);
1078 static int mvs_collector_task_exec(struct sas_task
*task
, const int num
, gfp_t gfp_flags
,
1079 struct completion
*completion
, int is_tmf
,
1080 struct mvs_tmf_task
*tmf
)
1082 struct domain_device
*dev
= task
->dev
;
1083 struct mvs_prv_info
*mpi
= dev
->port
->ha
->lldd_ha
;
1084 struct mvs_info
*mvi
= NULL
;
1085 struct sas_task
*t
= task
;
1086 struct mvs_task_list
*mvs_list
= NULL
, *a
;
1091 unsigned long flags
= 0;
1093 mvs_list
= mvs_task_alloc_list(&n
, gfp_flags
);
1095 printk(KERN_ERR
"%s: mvs alloc list failed.\n", __func__
);
1100 __list_add(&q
, mvs_list
->list
.prev
, &mvs_list
->list
);
1102 list_for_each_entry(a
, &q
, list
) {
1104 t
= list_entry(t
->list
.next
, struct sas_task
, list
);
1107 list_for_each_entry(a
, &q
, list
) {
1110 mvi
= ((struct mvs_device
*)t
->dev
->lldd_dev
)->mvi_info
;
1112 spin_lock_irqsave(&mvi
->lock
, flags
);
1113 rc
= mvs_task_prep(t
, mvi
, is_tmf
, tmf
, &pass
[mvi
->id
]);
1115 dev_printk(KERN_ERR
, mvi
->dev
, "mvsas exec failed[%d]!\n", rc
);
1116 spin_unlock_irqrestore(&mvi
->lock
, flags
);
1119 if (likely(pass
[0]))
1120 MVS_CHIP_DISP
->start_delivery(mpi
->mvi
[0],
1121 (mpi
->mvi
[0]->tx_prod
- 1) & (MVS_CHIP_SLOT_SZ
- 1));
1123 if (likely(pass
[1]))
1124 MVS_CHIP_DISP
->start_delivery(mpi
->mvi
[1],
1125 (mpi
->mvi
[1]->tx_prod
- 1) & (MVS_CHIP_SLOT_SZ
- 1));
1131 mvs_task_free_list(mvs_list
);
1136 int mvs_queue_command(struct sas_task
*task
, const int num
,
1139 struct mvs_device
*mvi_dev
= task
->dev
->lldd_dev
;
1140 struct sas_ha_struct
*sas
= mvi_dev
->mvi_info
->sas
;
1142 if (sas
->lldd_max_execute_num
< 2)
1143 return mvs_task_exec(task
, num
, gfp_flags
, NULL
, 0, NULL
);
1145 return mvs_collector_task_exec(task
, num
, gfp_flags
, NULL
, 0, NULL
);
1148 static void mvs_slot_free(struct mvs_info
*mvi
, u32 rx_desc
)
1150 u32 slot_idx
= rx_desc
& RXQ_SLOT_MASK
;
1151 mvs_tag_clear(mvi
, slot_idx
);
1154 static void mvs_slot_task_free(struct mvs_info
*mvi
, struct sas_task
*task
,
1155 struct mvs_slot_info
*slot
, u32 slot_idx
)
1159 if (!sas_protocol_ata(task
->task_proto
))
1161 dma_unmap_sg(mvi
->dev
, task
->scatter
,
1162 slot
->n_elem
, task
->data_dir
);
1164 switch (task
->task_proto
) {
1165 case SAS_PROTOCOL_SMP
:
1166 dma_unmap_sg(mvi
->dev
, &task
->smp_task
.smp_resp
, 1,
1167 PCI_DMA_FROMDEVICE
);
1168 dma_unmap_sg(mvi
->dev
, &task
->smp_task
.smp_req
, 1,
1172 case SAS_PROTOCOL_SATA
:
1173 case SAS_PROTOCOL_STP
:
1174 case SAS_PROTOCOL_SSP
:
1181 pci_pool_free(mvi
->dma_pool
, slot
->buf
, slot
->buf_dma
);
1184 list_del_init(&slot
->entry
);
1185 task
->lldd_task
= NULL
;
1188 slot
->slot_tag
= 0xFFFFFFFF;
1189 mvs_slot_free(mvi
, slot_idx
);
1192 static void mvs_update_wideport(struct mvs_info
*mvi
, int i
)
1194 struct mvs_phy
*phy
= &mvi
->phy
[i
];
1195 struct mvs_port
*port
= phy
->port
;
1198 for_each_phy(port
->wide_port_phymap
, j
, no
) {
1200 MVS_CHIP_DISP
->write_port_cfg_addr(mvi
, no
,
1202 MVS_CHIP_DISP
->write_port_cfg_data(mvi
, no
,
1203 port
->wide_port_phymap
);
1205 MVS_CHIP_DISP
->write_port_cfg_addr(mvi
, no
,
1207 MVS_CHIP_DISP
->write_port_cfg_data(mvi
, no
,
1213 static u32
mvs_is_phy_ready(struct mvs_info
*mvi
, int i
)
1216 struct mvs_phy
*phy
= &mvi
->phy
[i
];
1217 struct mvs_port
*port
= phy
->port
;
1219 tmp
= MVS_CHIP_DISP
->read_phy_ctl(mvi
, i
);
1220 if ((tmp
& PHY_READY_MASK
) && !(phy
->irq_status
& PHYEV_POOF
)) {
1222 phy
->phy_attached
= 1;
1227 if (phy
->phy_type
& PORT_TYPE_SAS
) {
1228 port
->wide_port_phymap
&= ~(1U << i
);
1229 if (!port
->wide_port_phymap
)
1230 port
->port_attached
= 0;
1231 mvs_update_wideport(mvi
, i
);
1232 } else if (phy
->phy_type
& PORT_TYPE_SATA
)
1233 port
->port_attached
= 0;
1235 phy
->phy_attached
= 0;
1236 phy
->phy_type
&= ~(PORT_TYPE_SAS
| PORT_TYPE_SATA
);
1241 static void *mvs_get_d2h_reg(struct mvs_info
*mvi
, int i
, void *buf
)
1243 u32
*s
= (u32
*) buf
;
1248 MVS_CHIP_DISP
->write_port_cfg_addr(mvi
, i
, PHYR_SATA_SIG3
);
1249 s
[3] = MVS_CHIP_DISP
->read_port_cfg_data(mvi
, i
);
1251 MVS_CHIP_DISP
->write_port_cfg_addr(mvi
, i
, PHYR_SATA_SIG2
);
1252 s
[2] = MVS_CHIP_DISP
->read_port_cfg_data(mvi
, i
);
1254 MVS_CHIP_DISP
->write_port_cfg_addr(mvi
, i
, PHYR_SATA_SIG1
);
1255 s
[1] = MVS_CHIP_DISP
->read_port_cfg_data(mvi
, i
);
1257 MVS_CHIP_DISP
->write_port_cfg_addr(mvi
, i
, PHYR_SATA_SIG0
);
1258 s
[0] = MVS_CHIP_DISP
->read_port_cfg_data(mvi
, i
);
1260 /* Workaround: take some ATAPI devices for ATA */
1261 if (((s
[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8
*)&s
[3] == 0x01))
1262 s
[1] = 0x00EB1401 | (*((u8
*)&s
[1] + 3) & 0x10);
1267 static u32
mvs_is_sig_fis_received(u32 irq_status
)
1269 return irq_status
& PHYEV_SIG_FIS
;
1272 void mvs_update_phyinfo(struct mvs_info
*mvi
, int i
, int get_st
)
1274 struct mvs_phy
*phy
= &mvi
->phy
[i
];
1275 struct sas_identify_frame
*id
;
1277 id
= (struct sas_identify_frame
*)phy
->frame_rcvd
;
1280 phy
->irq_status
= MVS_CHIP_DISP
->read_port_irq_stat(mvi
, i
);
1281 phy
->phy_status
= mvs_is_phy_ready(mvi
, i
);
1284 if (phy
->phy_status
) {
1286 struct asd_sas_phy
*sas_phy
= &mvi
->phy
[i
].sas_phy
;
1288 oob_done
= MVS_CHIP_DISP
->oob_done(mvi
, i
);
1290 MVS_CHIP_DISP
->fix_phy_info(mvi
, i
, id
);
1291 if (phy
->phy_type
& PORT_TYPE_SATA
) {
1292 phy
->identify
.target_port_protocols
= SAS_PROTOCOL_STP
;
1293 if (mvs_is_sig_fis_received(phy
->irq_status
)) {
1294 phy
->phy_attached
= 1;
1295 phy
->att_dev_sas_addr
=
1296 i
+ mvi
->id
* mvi
->chip
->n_phy
;
1298 sas_phy
->oob_mode
= SATA_OOB_MODE
;
1299 phy
->frame_rcvd_size
=
1300 sizeof(struct dev_to_host_fis
);
1301 mvs_get_d2h_reg(mvi
, i
, id
);
1304 dev_printk(KERN_DEBUG
, mvi
->dev
,
1305 "Phy%d : No sig fis\n", i
);
1306 tmp
= MVS_CHIP_DISP
->read_port_irq_mask(mvi
, i
);
1307 MVS_CHIP_DISP
->write_port_irq_mask(mvi
, i
,
1308 tmp
| PHYEV_SIG_FIS
);
1309 phy
->phy_attached
= 0;
1310 phy
->phy_type
&= ~PORT_TYPE_SATA
;
1311 MVS_CHIP_DISP
->phy_reset(mvi
, i
, 0);
1314 } else if (phy
->phy_type
& PORT_TYPE_SAS
1315 || phy
->att_dev_info
& PORT_SSP_INIT_MASK
) {
1316 phy
->phy_attached
= 1;
1317 phy
->identify
.device_type
=
1318 phy
->att_dev_info
& PORT_DEV_TYPE_MASK
;
1320 if (phy
->identify
.device_type
== SAS_END_DEV
)
1321 phy
->identify
.target_port_protocols
=
1323 else if (phy
->identify
.device_type
!= NO_DEVICE
)
1324 phy
->identify
.target_port_protocols
=
1327 sas_phy
->oob_mode
= SAS_OOB_MODE
;
1328 phy
->frame_rcvd_size
=
1329 sizeof(struct sas_identify_frame
);
1331 memcpy(sas_phy
->attached_sas_addr
,
1332 &phy
->att_dev_sas_addr
, SAS_ADDR_SIZE
);
1334 if (MVS_CHIP_DISP
->phy_work_around
)
1335 MVS_CHIP_DISP
->phy_work_around(mvi
, i
);
1337 mv_dprintk("port %d attach dev info is %x\n",
1338 i
+ mvi
->id
* mvi
->chip
->n_phy
, phy
->att_dev_info
);
1339 mv_dprintk("port %d attach sas addr is %llx\n",
1340 i
+ mvi
->id
* mvi
->chip
->n_phy
, phy
->att_dev_sas_addr
);
1343 MVS_CHIP_DISP
->write_port_irq_stat(mvi
, i
, phy
->irq_status
);
1346 static void mvs_port_notify_formed(struct asd_sas_phy
*sas_phy
, int lock
)
1348 struct sas_ha_struct
*sas_ha
= sas_phy
->ha
;
1349 struct mvs_info
*mvi
= NULL
; int i
= 0, hi
;
1350 struct mvs_phy
*phy
= sas_phy
->lldd_phy
;
1351 struct asd_sas_port
*sas_port
= sas_phy
->port
;
1352 struct mvs_port
*port
;
1353 unsigned long flags
= 0;
1357 while (sas_ha
->sas_phy
[i
]) {
1358 if (sas_ha
->sas_phy
[i
] == sas_phy
)
1362 hi
= i
/((struct mvs_prv_info
*)sas_ha
->lldd_ha
)->n_phy
;
1363 mvi
= ((struct mvs_prv_info
*)sas_ha
->lldd_ha
)->mvi
[hi
];
1364 if (sas_port
->id
>= mvi
->chip
->n_phy
)
1365 port
= &mvi
->port
[sas_port
->id
- mvi
->chip
->n_phy
];
1367 port
= &mvi
->port
[sas_port
->id
];
1369 spin_lock_irqsave(&mvi
->lock
, flags
);
1370 port
->port_attached
= 1;
1372 sas_port
->lldd_port
= port
;
1373 if (phy
->phy_type
& PORT_TYPE_SAS
) {
1374 port
->wide_port_phymap
= sas_port
->phy_mask
;
1375 mv_printk("set wide port phy map %x\n", sas_port
->phy_mask
);
1376 mvs_update_wideport(mvi
, sas_phy
->id
);
1379 spin_unlock_irqrestore(&mvi
->lock
, flags
);
1382 static void mvs_port_notify_deformed(struct asd_sas_phy
*sas_phy
, int lock
)
1384 struct domain_device
*dev
;
1385 struct mvs_phy
*phy
= sas_phy
->lldd_phy
;
1386 struct mvs_info
*mvi
= phy
->mvi
;
1387 struct asd_sas_port
*port
= sas_phy
->port
;
1390 while (phy
!= &mvi
->phy
[phy_no
]) {
1392 if (phy_no
>= MVS_MAX_PHYS
)
1395 list_for_each_entry(dev
, &port
->dev_list
, dev_list_node
)
1396 mvs_do_release_task(phy
->mvi
, phy_no
, NULL
);
1401 void mvs_port_formed(struct asd_sas_phy
*sas_phy
)
1403 mvs_port_notify_formed(sas_phy
, 1);
1406 void mvs_port_deformed(struct asd_sas_phy
*sas_phy
)
1408 mvs_port_notify_deformed(sas_phy
, 1);
1411 struct mvs_device
*mvs_alloc_dev(struct mvs_info
*mvi
)
1414 for (dev
= 0; dev
< MVS_MAX_DEVICES
; dev
++) {
1415 if (mvi
->devices
[dev
].dev_type
== NO_DEVICE
) {
1416 mvi
->devices
[dev
].device_id
= dev
;
1417 return &mvi
->devices
[dev
];
1421 if (dev
== MVS_MAX_DEVICES
)
1422 mv_printk("max support %d devices, ignore ..\n",
1428 void mvs_free_dev(struct mvs_device
*mvi_dev
)
1430 u32 id
= mvi_dev
->device_id
;
1431 memset(mvi_dev
, 0, sizeof(*mvi_dev
));
1432 mvi_dev
->device_id
= id
;
1433 mvi_dev
->dev_type
= NO_DEVICE
;
1434 mvi_dev
->dev_status
= MVS_DEV_NORMAL
;
1435 mvi_dev
->taskfileset
= MVS_ID_NOT_MAPPED
;
1438 int mvs_dev_found_notify(struct domain_device
*dev
, int lock
)
1440 unsigned long flags
= 0;
1442 struct mvs_info
*mvi
= NULL
;
1443 struct domain_device
*parent_dev
= dev
->parent
;
1444 struct mvs_device
*mvi_device
;
1446 mvi
= mvs_find_dev_mvi(dev
);
1449 spin_lock_irqsave(&mvi
->lock
, flags
);
1451 mvi_device
= mvs_alloc_dev(mvi
);
1456 dev
->lldd_dev
= mvi_device
;
1457 mvi_device
->dev_status
= MVS_DEV_NORMAL
;
1458 mvi_device
->dev_type
= dev
->dev_type
;
1459 mvi_device
->mvi_info
= mvi
;
1460 if (parent_dev
&& DEV_IS_EXPANDER(parent_dev
->dev_type
)) {
1462 u8 phy_num
= parent_dev
->ex_dev
.num_phys
;
1464 for (phy_id
= 0; phy_id
< phy_num
; phy_id
++) {
1465 phy
= &parent_dev
->ex_dev
.ex_phy
[phy_id
];
1466 if (SAS_ADDR(phy
->attached_sas_addr
) ==
1467 SAS_ADDR(dev
->sas_addr
)) {
1468 mvi_device
->attached_phy
= phy_id
;
1473 if (phy_id
== phy_num
) {
1474 mv_printk("Error: no attached dev:%016llx"
1476 SAS_ADDR(dev
->sas_addr
),
1477 SAS_ADDR(parent_dev
->sas_addr
));
1484 spin_unlock_irqrestore(&mvi
->lock
, flags
);
1488 int mvs_dev_found(struct domain_device
*dev
)
1490 return mvs_dev_found_notify(dev
, 1);
1493 void mvs_dev_gone_notify(struct domain_device
*dev
)
1495 unsigned long flags
= 0;
1496 struct mvs_device
*mvi_dev
= dev
->lldd_dev
;
1497 struct mvs_info
*mvi
= mvi_dev
->mvi_info
;
1499 spin_lock_irqsave(&mvi
->lock
, flags
);
1502 mv_dprintk("found dev[%d:%x] is gone.\n",
1503 mvi_dev
->device_id
, mvi_dev
->dev_type
);
1504 mvs_release_task(mvi
, dev
);
1505 mvs_free_reg_set(mvi
, mvi_dev
);
1506 mvs_free_dev(mvi_dev
);
1508 mv_dprintk("found dev has gone.\n");
1510 dev
->lldd_dev
= NULL
;
1512 spin_unlock_irqrestore(&mvi
->lock
, flags
);
1516 void mvs_dev_gone(struct domain_device
*dev
)
1518 mvs_dev_gone_notify(dev
);
1521 static struct sas_task
*mvs_alloc_task(void)
1523 struct sas_task
*task
= kzalloc(sizeof(struct sas_task
), GFP_KERNEL
);
1526 INIT_LIST_HEAD(&task
->list
);
1527 spin_lock_init(&task
->task_state_lock
);
1528 task
->task_state_flags
= SAS_TASK_STATE_PENDING
;
1529 init_timer(&task
->timer
);
1530 init_completion(&task
->completion
);
1535 static void mvs_free_task(struct sas_task
*task
)
1538 BUG_ON(!list_empty(&task
->list
));
1543 static void mvs_task_done(struct sas_task
*task
)
1545 if (!del_timer(&task
->timer
))
1547 complete(&task
->completion
);
1550 static void mvs_tmf_timedout(unsigned long data
)
1552 struct sas_task
*task
= (struct sas_task
*)data
;
1554 task
->task_state_flags
|= SAS_TASK_STATE_ABORTED
;
1555 complete(&task
->completion
);
1559 #define MVS_TASK_TIMEOUT 20
1560 static int mvs_exec_internal_tmf_task(struct domain_device
*dev
,
1561 void *parameter
, u32 para_len
, struct mvs_tmf_task
*tmf
)
1564 struct sas_task
*task
= NULL
;
1566 for (retry
= 0; retry
< 3; retry
++) {
1567 task
= mvs_alloc_task();
1572 task
->task_proto
= dev
->tproto
;
1574 memcpy(&task
->ssp_task
, parameter
, para_len
);
1575 task
->task_done
= mvs_task_done
;
1577 task
->timer
.data
= (unsigned long) task
;
1578 task
->timer
.function
= mvs_tmf_timedout
;
1579 task
->timer
.expires
= jiffies
+ MVS_TASK_TIMEOUT
*HZ
;
1580 add_timer(&task
->timer
);
1582 res
= mvs_task_exec(task
, 1, GFP_KERNEL
, NULL
, 1, tmf
);
1585 del_timer(&task
->timer
);
1586 mv_printk("executing internel task failed:%d\n", res
);
1590 wait_for_completion(&task
->completion
);
1591 res
= -TMF_RESP_FUNC_FAILED
;
1592 /* Even TMF timed out, return direct. */
1593 if ((task
->task_state_flags
& SAS_TASK_STATE_ABORTED
)) {
1594 if (!(task
->task_state_flags
& SAS_TASK_STATE_DONE
)) {
1595 mv_printk("TMF task[%x] timeout.\n", tmf
->tmf
);
1600 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1601 task
->task_status
.stat
== SAM_STAT_GOOD
) {
1602 res
= TMF_RESP_FUNC_COMPLETE
;
1606 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1607 task
->task_status
.stat
== SAS_DATA_UNDERRUN
) {
1608 /* no error, but return the number of bytes of
1610 res
= task
->task_status
.residual
;
1614 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1615 task
->task_status
.stat
== SAS_DATA_OVERRUN
) {
1616 mv_dprintk("blocked task error.\n");
1620 mv_dprintk(" task to dev %016llx response: 0x%x "
1622 SAS_ADDR(dev
->sas_addr
),
1623 task
->task_status
.resp
,
1624 task
->task_status
.stat
);
1625 mvs_free_task(task
);
1631 BUG_ON(retry
== 3 && task
!= NULL
);
1633 mvs_free_task(task
);
1637 static int mvs_debug_issue_ssp_tmf(struct domain_device
*dev
,
1638 u8
*lun
, struct mvs_tmf_task
*tmf
)
1640 struct sas_ssp_task ssp_task
;
1641 DECLARE_COMPLETION_ONSTACK(completion
);
1642 if (!(dev
->tproto
& SAS_PROTOCOL_SSP
))
1643 return TMF_RESP_FUNC_ESUPP
;
1645 strncpy((u8
*)&ssp_task
.LUN
, lun
, 8);
1647 return mvs_exec_internal_tmf_task(dev
, &ssp_task
,
1648 sizeof(ssp_task
), tmf
);
1652 /* Standard mandates link reset for ATA (type 0)
1653 and hard reset for SSP (type 1) , only for RECOVERY */
1654 static int mvs_debug_I_T_nexus_reset(struct domain_device
*dev
)
1657 struct sas_phy
*phy
= sas_find_local_phy(dev
);
1658 int reset_type
= (dev
->dev_type
== SATA_DEV
||
1659 (dev
->tproto
& SAS_PROTOCOL_STP
)) ? 0 : 1;
1660 rc
= sas_phy_reset(phy
, reset_type
);
1665 /* mandatory SAM-3 */
1666 int mvs_lu_reset(struct domain_device
*dev
, u8
*lun
)
1668 unsigned long flags
;
1669 int i
, phyno
[WIDE_PORT_MAX_PHY
], num
, rc
= TMF_RESP_FUNC_FAILED
;
1670 struct mvs_tmf_task tmf_task
;
1671 struct mvs_device
* mvi_dev
= dev
->lldd_dev
;
1672 struct mvs_info
*mvi
= mvi_dev
->mvi_info
;
1674 tmf_task
.tmf
= TMF_LU_RESET
;
1675 mvi_dev
->dev_status
= MVS_DEV_EH
;
1676 rc
= mvs_debug_issue_ssp_tmf(dev
, lun
, &tmf_task
);
1677 if (rc
== TMF_RESP_FUNC_COMPLETE
) {
1678 num
= mvs_find_dev_phyno(dev
, phyno
);
1679 spin_lock_irqsave(&mvi
->lock
, flags
);
1680 for (i
= 0; i
< num
; i
++)
1681 mvs_release_task(mvi
, dev
);
1682 spin_unlock_irqrestore(&mvi
->lock
, flags
);
1684 /* If failed, fall-through I_T_Nexus reset */
1685 mv_printk("%s for device[%x]:rc= %d\n", __func__
,
1686 mvi_dev
->device_id
, rc
);
1690 int mvs_I_T_nexus_reset(struct domain_device
*dev
)
1692 unsigned long flags
;
1693 int rc
= TMF_RESP_FUNC_FAILED
;
1694 struct mvs_device
* mvi_dev
= (struct mvs_device
*)dev
->lldd_dev
;
1695 struct mvs_info
*mvi
= mvi_dev
->mvi_info
;
1697 if (mvi_dev
->dev_status
!= MVS_DEV_EH
)
1698 return TMF_RESP_FUNC_COMPLETE
;
1699 rc
= mvs_debug_I_T_nexus_reset(dev
);
1700 mv_printk("%s for device[%x]:rc= %d\n",
1701 __func__
, mvi_dev
->device_id
, rc
);
1704 spin_lock_irqsave(&mvi
->lock
, flags
);
1705 mvs_release_task(mvi
, dev
);
1706 spin_unlock_irqrestore(&mvi
->lock
, flags
);
1710 /* optional SAM-3 */
1711 int mvs_query_task(struct sas_task
*task
)
1714 struct scsi_lun lun
;
1715 struct mvs_tmf_task tmf_task
;
1716 int rc
= TMF_RESP_FUNC_FAILED
;
1718 if (task
->lldd_task
&& task
->task_proto
& SAS_PROTOCOL_SSP
) {
1719 struct scsi_cmnd
* cmnd
= (struct scsi_cmnd
*)task
->uldd_task
;
1720 struct domain_device
*dev
= task
->dev
;
1721 struct mvs_device
*mvi_dev
= (struct mvs_device
*)dev
->lldd_dev
;
1722 struct mvs_info
*mvi
= mvi_dev
->mvi_info
;
1724 int_to_scsilun(cmnd
->device
->lun
, &lun
);
1725 rc
= mvs_find_tag(mvi
, task
, &tag
);
1727 rc
= TMF_RESP_FUNC_FAILED
;
1731 tmf_task
.tmf
= TMF_QUERY_TASK
;
1732 tmf_task
.tag_of_task_to_be_managed
= cpu_to_le16(tag
);
1734 rc
= mvs_debug_issue_ssp_tmf(dev
, lun
.scsi_lun
, &tmf_task
);
1736 /* The task is still in Lun, release it then */
1737 case TMF_RESP_FUNC_SUCC
:
1738 /* The task is not in Lun or failed, reset the phy */
1739 case TMF_RESP_FUNC_FAILED
:
1740 case TMF_RESP_FUNC_COMPLETE
:
1743 rc
= TMF_RESP_FUNC_COMPLETE
;
1747 mv_printk("%s:rc= %d\n", __func__
, rc
);
1751 /* mandatory SAM-3, still need free task/slot info */
1752 int mvs_abort_task(struct sas_task
*task
)
1754 struct scsi_lun lun
;
1755 struct mvs_tmf_task tmf_task
;
1756 struct domain_device
*dev
= task
->dev
;
1757 struct mvs_device
*mvi_dev
= (struct mvs_device
*)dev
->lldd_dev
;
1758 struct mvs_info
*mvi
;
1759 int rc
= TMF_RESP_FUNC_FAILED
;
1760 unsigned long flags
;
1764 mv_printk("%s:%d TMF_RESP_FUNC_FAILED\n", __func__
, __LINE__
);
1765 rc
= TMF_RESP_FUNC_FAILED
;
1768 mvi
= mvi_dev
->mvi_info
;
1770 spin_lock_irqsave(&task
->task_state_lock
, flags
);
1771 if (task
->task_state_flags
& SAS_TASK_STATE_DONE
) {
1772 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
1773 rc
= TMF_RESP_FUNC_COMPLETE
;
1776 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
1777 mvi_dev
->dev_status
= MVS_DEV_EH
;
1778 if (task
->lldd_task
&& task
->task_proto
& SAS_PROTOCOL_SSP
) {
1779 struct scsi_cmnd
* cmnd
= (struct scsi_cmnd
*)task
->uldd_task
;
1781 int_to_scsilun(cmnd
->device
->lun
, &lun
);
1782 rc
= mvs_find_tag(mvi
, task
, &tag
);
1784 mv_printk("No such tag in %s\n", __func__
);
1785 rc
= TMF_RESP_FUNC_FAILED
;
1789 tmf_task
.tmf
= TMF_ABORT_TASK
;
1790 tmf_task
.tag_of_task_to_be_managed
= cpu_to_le16(tag
);
1792 rc
= mvs_debug_issue_ssp_tmf(dev
, lun
.scsi_lun
, &tmf_task
);
1794 /* if successful, clear the task and callback forwards.*/
1795 if (rc
== TMF_RESP_FUNC_COMPLETE
) {
1797 struct mvs_slot_info
*slot
;
1799 if (task
->lldd_task
) {
1800 slot
= task
->lldd_task
;
1801 slot_no
= (u32
) (slot
- mvi
->slot_info
);
1802 spin_lock_irqsave(&mvi
->lock
, flags
);
1803 mvs_slot_complete(mvi
, slot_no
, 1);
1804 spin_unlock_irqrestore(&mvi
->lock
, flags
);
1808 } else if (task
->task_proto
& SAS_PROTOCOL_SATA
||
1809 task
->task_proto
& SAS_PROTOCOL_STP
) {
1810 /* to do free register_set */
1811 if (SATA_DEV
== dev
->dev_type
) {
1812 struct mvs_slot_info
*slot
= task
->lldd_task
;
1813 struct task_status_struct
*tstat
;
1814 u32 slot_idx
= (u32
)(slot
- mvi
->slot_info
);
1815 tstat
= &task
->task_status
;
1816 mv_dprintk(KERN_DEBUG
"mv_abort_task() mvi=%p task=%p "
1817 "slot=%p slot_idx=x%x\n",
1818 mvi
, task
, slot
, slot_idx
);
1819 tstat
->stat
= SAS_ABORTED_TASK
;
1820 if (mvi_dev
&& mvi_dev
->running_req
)
1821 mvi_dev
->running_req
--;
1822 if (sas_protocol_ata(task
->task_proto
))
1823 mvs_free_reg_set(mvi
, mvi_dev
);
1824 mvs_slot_task_free(mvi
, task
, slot
, slot_idx
);
1832 if (rc
!= TMF_RESP_FUNC_COMPLETE
)
1833 mv_printk("%s:rc= %d\n", __func__
, rc
);
1837 int mvs_abort_task_set(struct domain_device
*dev
, u8
*lun
)
1839 int rc
= TMF_RESP_FUNC_FAILED
;
1840 struct mvs_tmf_task tmf_task
;
1842 tmf_task
.tmf
= TMF_ABORT_TASK_SET
;
1843 rc
= mvs_debug_issue_ssp_tmf(dev
, lun
, &tmf_task
);
1848 int mvs_clear_aca(struct domain_device
*dev
, u8
*lun
)
1850 int rc
= TMF_RESP_FUNC_FAILED
;
1851 struct mvs_tmf_task tmf_task
;
1853 tmf_task
.tmf
= TMF_CLEAR_ACA
;
1854 rc
= mvs_debug_issue_ssp_tmf(dev
, lun
, &tmf_task
);
1859 int mvs_clear_task_set(struct domain_device
*dev
, u8
*lun
)
1861 int rc
= TMF_RESP_FUNC_FAILED
;
1862 struct mvs_tmf_task tmf_task
;
1864 tmf_task
.tmf
= TMF_CLEAR_TASK_SET
;
1865 rc
= mvs_debug_issue_ssp_tmf(dev
, lun
, &tmf_task
);
1870 static int mvs_sata_done(struct mvs_info
*mvi
, struct sas_task
*task
,
1871 u32 slot_idx
, int err
)
1873 struct mvs_device
*mvi_dev
= task
->dev
->lldd_dev
;
1874 struct task_status_struct
*tstat
= &task
->task_status
;
1875 struct ata_task_resp
*resp
= (struct ata_task_resp
*)tstat
->buf
;
1876 int stat
= SAM_STAT_GOOD
;
1879 resp
->frame_len
= sizeof(struct dev_to_host_fis
);
1880 memcpy(&resp
->ending_fis
[0],
1881 SATA_RECEIVED_D2H_FIS(mvi_dev
->taskfileset
),
1882 sizeof(struct dev_to_host_fis
));
1883 tstat
->buf_valid_size
= sizeof(*resp
);
1884 if (unlikely(err
)) {
1885 if (unlikely(err
& CMD_ISS_STPD
))
1886 stat
= SAS_OPEN_REJECT
;
1888 stat
= SAS_PROTO_RESPONSE
;
1894 static int mvs_slot_err(struct mvs_info
*mvi
, struct sas_task
*task
,
1897 struct mvs_slot_info
*slot
= &mvi
->slot_info
[slot_idx
];
1899 u32 err_dw0
= le32_to_cpu(*(u32
*) (slot
->response
));
1901 enum mvs_port_type type
= PORT_TYPE_SAS
;
1903 if (err_dw0
& CMD_ISS_STPD
)
1904 MVS_CHIP_DISP
->issue_stop(mvi
, type
, tfs
);
1906 MVS_CHIP_DISP
->command_active(mvi
, slot_idx
);
1908 stat
= SAM_STAT_CHECK_CONDITION
;
1909 switch (task
->task_proto
) {
1910 case SAS_PROTOCOL_SSP
:
1911 stat
= SAS_ABORTED_TASK
;
1913 case SAS_PROTOCOL_SMP
:
1914 stat
= SAM_STAT_CHECK_CONDITION
;
1917 case SAS_PROTOCOL_SATA
:
1918 case SAS_PROTOCOL_STP
:
1919 case SAS_PROTOCOL_SATA
| SAS_PROTOCOL_STP
:
1921 if (err_dw0
== 0x80400002)
1922 mv_printk("find reserved error, why?\n");
1924 task
->ata_task
.use_ncq
= 0;
1925 mvs_sata_done(mvi
, task
, slot_idx
, err_dw0
);
1935 int mvs_slot_complete(struct mvs_info
*mvi
, u32 rx_desc
, u32 flags
)
1937 u32 slot_idx
= rx_desc
& RXQ_SLOT_MASK
;
1938 struct mvs_slot_info
*slot
= &mvi
->slot_info
[slot_idx
];
1939 struct sas_task
*task
= slot
->task
;
1940 struct mvs_device
*mvi_dev
= NULL
;
1941 struct task_status_struct
*tstat
;
1942 struct domain_device
*dev
;
1946 enum exec_status sts
;
1950 if (unlikely(!task
|| !task
->lldd_task
|| !task
->dev
))
1953 tstat
= &task
->task_status
;
1955 mvi_dev
= dev
->lldd_dev
;
1957 mvs_hba_cq_dump(mvi
);
1959 spin_lock(&task
->task_state_lock
);
1960 task
->task_state_flags
&=
1961 ~(SAS_TASK_STATE_PENDING
| SAS_TASK_AT_INITIATOR
);
1962 task
->task_state_flags
|= SAS_TASK_STATE_DONE
;
1964 aborted
= task
->task_state_flags
& SAS_TASK_STATE_ABORTED
;
1965 spin_unlock(&task
->task_state_lock
);
1967 memset(tstat
, 0, sizeof(*tstat
));
1968 tstat
->resp
= SAS_TASK_COMPLETE
;
1970 if (unlikely(aborted
)) {
1971 tstat
->stat
= SAS_ABORTED_TASK
;
1972 if (mvi_dev
&& mvi_dev
->running_req
)
1973 mvi_dev
->running_req
--;
1974 if (sas_protocol_ata(task
->task_proto
))
1975 mvs_free_reg_set(mvi
, mvi_dev
);
1977 mvs_slot_task_free(mvi
, task
, slot
, slot_idx
);
1981 if (unlikely(!mvi_dev
|| flags
)) {
1983 mv_dprintk("port has not device.\n");
1984 tstat
->stat
= SAS_PHY_DOWN
;
1988 /* error info record present */
1989 if (unlikely((rx_desc
& RXQ_ERR
) && (*(u64
*) slot
->response
))) {
1990 tstat
->stat
= mvs_slot_err(mvi
, task
, slot_idx
);
1991 tstat
->resp
= SAS_TASK_COMPLETE
;
1995 switch (task
->task_proto
) {
1996 case SAS_PROTOCOL_SSP
:
1997 /* hw says status == 0, datapres == 0 */
1998 if (rx_desc
& RXQ_GOOD
) {
1999 tstat
->stat
= SAM_STAT_GOOD
;
2000 tstat
->resp
= SAS_TASK_COMPLETE
;
2002 /* response frame present */
2003 else if (rx_desc
& RXQ_RSP
) {
2004 struct ssp_response_iu
*iu
= slot
->response
+
2005 sizeof(struct mvs_err_info
);
2006 sas_ssp_task_response(mvi
->dev
, task
, iu
);
2008 tstat
->stat
= SAM_STAT_CHECK_CONDITION
;
2011 case SAS_PROTOCOL_SMP
: {
2012 struct scatterlist
*sg_resp
= &task
->smp_task
.smp_resp
;
2013 tstat
->stat
= SAM_STAT_GOOD
;
2014 to
= kmap_atomic(sg_page(sg_resp
), KM_IRQ0
);
2015 memcpy(to
+ sg_resp
->offset
,
2016 slot
->response
+ sizeof(struct mvs_err_info
),
2017 sg_dma_len(sg_resp
));
2018 kunmap_atomic(to
, KM_IRQ0
);
2022 case SAS_PROTOCOL_SATA
:
2023 case SAS_PROTOCOL_STP
:
2024 case SAS_PROTOCOL_SATA
| SAS_PROTOCOL_STP
: {
2025 tstat
->stat
= mvs_sata_done(mvi
, task
, slot_idx
, 0);
2030 tstat
->stat
= SAM_STAT_CHECK_CONDITION
;
2033 if (!slot
->port
->port_attached
) {
2034 mv_dprintk("port %d has removed.\n", slot
->port
->sas_port
.id
);
2035 tstat
->stat
= SAS_PHY_DOWN
;
2040 if (mvi_dev
&& mvi_dev
->running_req
) {
2041 mvi_dev
->running_req
--;
2042 if (sas_protocol_ata(task
->task_proto
) && !mvi_dev
->running_req
)
2043 mvs_free_reg_set(mvi
, mvi_dev
);
2045 mvs_slot_task_free(mvi
, task
, slot
, slot_idx
);
2048 spin_unlock(&mvi
->lock
);
2049 if (task
->task_done
)
2050 task
->task_done(task
);
2052 mv_dprintk("why has not task_done.\n");
2053 spin_lock(&mvi
->lock
);
2058 void mvs_do_release_task(struct mvs_info
*mvi
,
2059 int phy_no
, struct domain_device
*dev
)
2062 struct mvs_phy
*phy
;
2063 struct mvs_port
*port
;
2064 struct mvs_slot_info
*slot
, *slot2
;
2066 phy
= &mvi
->phy
[phy_no
];
2070 /* clean cmpl queue in case request is already finished */
2071 mvs_int_rx(mvi
, false);
2075 list_for_each_entry_safe(slot
, slot2
, &port
->list
, entry
) {
2076 struct sas_task
*task
;
2077 slot_idx
= (u32
) (slot
- mvi
->slot_info
);
2080 if (dev
&& task
->dev
!= dev
)
2083 mv_printk("Release slot [%x] tag[%x], task [%p]:\n",
2084 slot_idx
, slot
->slot_tag
, task
);
2085 MVS_CHIP_DISP
->command_active(mvi
, slot_idx
);
2087 mvs_slot_complete(mvi
, slot_idx
, 1);
2091 void mvs_release_task(struct mvs_info
*mvi
,
2092 struct domain_device
*dev
)
2094 int i
, phyno
[WIDE_PORT_MAX_PHY
], num
;
2096 num
= mvs_find_dev_phyno(dev
, phyno
);
2097 for (i
= 0; i
< num
; i
++)
2098 mvs_do_release_task(mvi
, phyno
[i
], dev
);
2101 static void mvs_phy_disconnected(struct mvs_phy
*phy
)
2103 phy
->phy_attached
= 0;
2104 phy
->att_dev_info
= 0;
2105 phy
->att_dev_sas_addr
= 0;
2108 static void mvs_work_queue(struct work_struct
*work
)
2110 struct delayed_work
*dw
= container_of(work
, struct delayed_work
, work
);
2111 struct mvs_wq
*mwq
= container_of(dw
, struct mvs_wq
, work_q
);
2112 struct mvs_info
*mvi
= mwq
->mvi
;
2113 unsigned long flags
;
2115 spin_lock_irqsave(&mvi
->lock
, flags
);
2116 if (mwq
->handler
& PHY_PLUG_EVENT
) {
2117 u32 phy_no
= (unsigned long) mwq
->data
;
2118 struct sas_ha_struct
*sas_ha
= mvi
->sas
;
2119 struct mvs_phy
*phy
= &mvi
->phy
[phy_no
];
2120 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
2122 if (phy
->phy_event
& PHY_PLUG_OUT
) {
2124 struct sas_identify_frame
*id
;
2125 id
= (struct sas_identify_frame
*)phy
->frame_rcvd
;
2126 tmp
= MVS_CHIP_DISP
->read_phy_ctl(mvi
, phy_no
);
2127 phy
->phy_event
&= ~PHY_PLUG_OUT
;
2128 if (!(tmp
& PHY_READY_MASK
)) {
2129 sas_phy_disconnected(sas_phy
);
2130 mvs_phy_disconnected(phy
);
2131 sas_ha
->notify_phy_event(sas_phy
,
2132 PHYE_LOSS_OF_SIGNAL
);
2133 mv_dprintk("phy%d Removed Device\n", phy_no
);
2135 MVS_CHIP_DISP
->detect_porttype(mvi
, phy_no
);
2136 mvs_update_phyinfo(mvi
, phy_no
, 1);
2137 mvs_bytes_dmaed(mvi
, phy_no
);
2138 mvs_port_notify_formed(sas_phy
, 0);
2139 mv_dprintk("phy%d Attached Device\n", phy_no
);
2143 list_del(&mwq
->entry
);
2144 spin_unlock_irqrestore(&mvi
->lock
, flags
);
2148 static int mvs_handle_event(struct mvs_info
*mvi
, void *data
, int handler
)
2153 mwq
= kmalloc(sizeof(struct mvs_wq
), GFP_ATOMIC
);
2157 mwq
->handler
= handler
;
2158 MV_INIT_DELAYED_WORK(&mwq
->work_q
, mvs_work_queue
, mwq
);
2159 list_add_tail(&mwq
->entry
, &mvi
->wq_list
);
2160 schedule_delayed_work(&mwq
->work_q
, HZ
* 2);
2167 static void mvs_sig_time_out(unsigned long tphy
)
2169 struct mvs_phy
*phy
= (struct mvs_phy
*)tphy
;
2170 struct mvs_info
*mvi
= phy
->mvi
;
2173 for (phy_no
= 0; phy_no
< mvi
->chip
->n_phy
; phy_no
++) {
2174 if (&mvi
->phy
[phy_no
] == phy
) {
2175 mv_dprintk("Get signature time out, reset phy %d\n",
2176 phy_no
+mvi
->id
*mvi
->chip
->n_phy
);
2177 MVS_CHIP_DISP
->phy_reset(mvi
, phy_no
, 1);
2182 static void mvs_sig_remove_timer(struct mvs_phy
*phy
)
2184 if (phy
->timer
.function
)
2185 del_timer(&phy
->timer
);
2186 phy
->timer
.function
= NULL
;
2189 void mvs_int_port(struct mvs_info
*mvi
, int phy_no
, u32 events
)
2192 struct sas_ha_struct
*sas_ha
= mvi
->sas
;
2193 struct mvs_phy
*phy
= &mvi
->phy
[phy_no
];
2194 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
2196 phy
->irq_status
= MVS_CHIP_DISP
->read_port_irq_stat(mvi
, phy_no
);
2197 mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no
+mvi
->id
*mvi
->chip
->n_phy
,
2198 MVS_CHIP_DISP
->read_phy_ctl(mvi
, phy_no
));
2199 mv_dprintk("Port %d irq sts = 0x%X\n", phy_no
+mvi
->id
*mvi
->chip
->n_phy
,
2203 * events is port event now ,
2204 * we need check the interrupt status which belongs to per port.
2207 if (phy
->irq_status
& PHYEV_DCDR_ERR
) {
2208 mv_dprintk("port %d STP decoding error.\n",
2209 phy_no
+ mvi
->id
*mvi
->chip
->n_phy
);
2212 if (phy
->irq_status
& PHYEV_POOF
) {
2213 if (!(phy
->phy_event
& PHY_PLUG_OUT
)) {
2214 int dev_sata
= phy
->phy_type
& PORT_TYPE_SATA
;
2216 mvs_do_release_task(mvi
, phy_no
, NULL
);
2217 phy
->phy_event
|= PHY_PLUG_OUT
;
2218 MVS_CHIP_DISP
->clear_srs_irq(mvi
, 0, 1);
2219 mvs_handle_event(mvi
,
2220 (void *)(unsigned long)phy_no
,
2222 ready
= mvs_is_phy_ready(mvi
, phy_no
);
2224 mv_dprintk("phy%d Unplug Notice\n",
2226 mvi
->id
* mvi
->chip
->n_phy
);
2227 if (ready
|| dev_sata
) {
2228 if (MVS_CHIP_DISP
->stp_reset
)
2229 MVS_CHIP_DISP
->stp_reset(mvi
,
2232 MVS_CHIP_DISP
->phy_reset(mvi
,
2239 if (phy
->irq_status
& PHYEV_COMWAKE
) {
2240 tmp
= MVS_CHIP_DISP
->read_port_irq_mask(mvi
, phy_no
);
2241 MVS_CHIP_DISP
->write_port_irq_mask(mvi
, phy_no
,
2242 tmp
| PHYEV_SIG_FIS
);
2243 if (phy
->timer
.function
== NULL
) {
2244 phy
->timer
.data
= (unsigned long)phy
;
2245 phy
->timer
.function
= mvs_sig_time_out
;
2246 phy
->timer
.expires
= jiffies
+ 10*HZ
;
2247 add_timer(&phy
->timer
);
2250 if (phy
->irq_status
& (PHYEV_SIG_FIS
| PHYEV_ID_DONE
)) {
2251 phy
->phy_status
= mvs_is_phy_ready(mvi
, phy_no
);
2252 mvs_sig_remove_timer(phy
);
2253 mv_dprintk("notify plug in on phy[%d]\n", phy_no
);
2254 if (phy
->phy_status
) {
2256 MVS_CHIP_DISP
->detect_porttype(mvi
, phy_no
);
2257 if (phy
->phy_type
& PORT_TYPE_SATA
) {
2258 tmp
= MVS_CHIP_DISP
->read_port_irq_mask(
2260 tmp
&= ~PHYEV_SIG_FIS
;
2261 MVS_CHIP_DISP
->write_port_irq_mask(mvi
,
2264 mvs_update_phyinfo(mvi
, phy_no
, 0);
2265 if (phy
->phy_type
& PORT_TYPE_SAS
) {
2266 MVS_CHIP_DISP
->phy_reset(mvi
, phy_no
, 2);
2270 mvs_bytes_dmaed(mvi
, phy_no
);
2271 /* whether driver is going to handle hot plug */
2272 if (phy
->phy_event
& PHY_PLUG_OUT
) {
2273 mvs_port_notify_formed(sas_phy
, 0);
2274 phy
->phy_event
&= ~PHY_PLUG_OUT
;
2277 mv_dprintk("plugin interrupt but phy%d is gone\n",
2278 phy_no
+ mvi
->id
*mvi
->chip
->n_phy
);
2280 } else if (phy
->irq_status
& PHYEV_BROAD_CH
) {
2281 mv_dprintk("port %d broadcast change.\n",
2282 phy_no
+ mvi
->id
*mvi
->chip
->n_phy
);
2283 /* exception for Samsung disk drive*/
2285 sas_ha
->notify_port_event(sas_phy
, PORTE_BROADCAST_RCVD
);
2287 MVS_CHIP_DISP
->write_port_irq_stat(mvi
, phy_no
, phy
->irq_status
);
2290 int mvs_int_rx(struct mvs_info
*mvi
, bool self_clear
)
2292 u32 rx_prod_idx
, rx_desc
;
2295 /* the first dword in the RX ring is special: it contains
2296 * a mirror of the hardware's RX producer index, so that
2297 * we don't have to stall the CPU reading that register.
2298 * The actual RX ring is offset by one dword, due to this.
2300 rx_prod_idx
= mvi
->rx_cons
;
2301 mvi
->rx_cons
= le32_to_cpu(mvi
->rx
[0]);
2302 if (mvi
->rx_cons
== 0xfff) /* h/w hasn't touched RX ring yet */
2305 /* The CMPL_Q may come late, read from register and try again
2306 * note: if coalescing is enabled,
2307 * it will need to read from register every time for sure
2309 if (unlikely(mvi
->rx_cons
== rx_prod_idx
))
2310 mvi
->rx_cons
= MVS_CHIP_DISP
->rx_update(mvi
) & RX_RING_SZ_MASK
;
2312 if (mvi
->rx_cons
== rx_prod_idx
)
2315 while (mvi
->rx_cons
!= rx_prod_idx
) {
2316 /* increment our internal RX consumer pointer */
2317 rx_prod_idx
= (rx_prod_idx
+ 1) & (MVS_RX_RING_SZ
- 1);
2318 rx_desc
= le32_to_cpu(mvi
->rx
[rx_prod_idx
+ 1]);
2320 if (likely(rx_desc
& RXQ_DONE
))
2321 mvs_slot_complete(mvi
, rx_desc
, 0);
2322 if (rx_desc
& RXQ_ATTN
) {
2324 } else if (rx_desc
& RXQ_ERR
) {
2325 if (!(rx_desc
& RXQ_DONE
))
2326 mvs_slot_complete(mvi
, rx_desc
, 0);
2327 } else if (rx_desc
& RXQ_SLOT_RESET
) {
2328 mvs_slot_free(mvi
, rx_desc
);
2332 if (attn
&& self_clear
)
2333 MVS_CHIP_DISP
->int_full(mvi
);