2 * Marvell 88SE64xx/88SE94xx main function
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
8 * This file is licensed under GPLv2.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; version 2 of the
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
28 static int mvs_find_tag(struct mvs_info
*mvi
, struct sas_task
*task
, u32
*tag
)
30 if (task
->lldd_task
) {
31 struct mvs_slot_info
*slot
;
32 slot
= task
->lldd_task
;
33 *tag
= slot
->slot_tag
;
39 void mvs_tag_clear(struct mvs_info
*mvi
, u32 tag
)
41 void *bitmap
= mvi
->tags
;
42 clear_bit(tag
, bitmap
);
45 void mvs_tag_free(struct mvs_info
*mvi
, u32 tag
)
47 mvs_tag_clear(mvi
, tag
);
50 void mvs_tag_set(struct mvs_info
*mvi
, unsigned int tag
)
52 void *bitmap
= mvi
->tags
;
56 inline int mvs_tag_alloc(struct mvs_info
*mvi
, u32
*tag_out
)
58 unsigned int index
, tag
;
59 void *bitmap
= mvi
->tags
;
61 index
= find_first_zero_bit(bitmap
, mvi
->tags_num
);
63 if (tag
>= mvi
->tags_num
)
64 return -SAS_QUEUE_FULL
;
65 mvs_tag_set(mvi
, tag
);
70 void mvs_tag_init(struct mvs_info
*mvi
)
73 for (i
= 0; i
< mvi
->tags_num
; ++i
)
74 mvs_tag_clear(mvi
, i
);
77 struct mvs_info
*mvs_find_dev_mvi(struct domain_device
*dev
)
79 unsigned long i
= 0, j
= 0, hi
= 0;
80 struct sas_ha_struct
*sha
= dev
->port
->ha
;
81 struct mvs_info
*mvi
= NULL
;
82 struct asd_sas_phy
*phy
;
84 while (sha
->sas_port
[i
]) {
85 if (sha
->sas_port
[i
] == dev
->port
) {
86 phy
= container_of(sha
->sas_port
[i
]->phy_list
.next
,
87 struct asd_sas_phy
, port_phy_el
);
89 while (sha
->sas_phy
[j
]) {
90 if (sha
->sas_phy
[j
] == phy
)
98 hi
= j
/((struct mvs_prv_info
*)sha
->lldd_ha
)->n_phy
;
99 mvi
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->mvi
[hi
];
105 int mvs_find_dev_phyno(struct domain_device
*dev
, int *phyno
)
107 unsigned long i
= 0, j
= 0, n
= 0, num
= 0;
108 struct mvs_device
*mvi_dev
= (struct mvs_device
*)dev
->lldd_dev
;
109 struct mvs_info
*mvi
= mvi_dev
->mvi_info
;
110 struct sas_ha_struct
*sha
= dev
->port
->ha
;
112 while (sha
->sas_port
[i
]) {
113 if (sha
->sas_port
[i
] == dev
->port
) {
114 struct asd_sas_phy
*phy
;
115 list_for_each_entry(phy
,
116 &sha
->sas_port
[i
]->phy_list
, port_phy_el
) {
118 while (sha
->sas_phy
[j
]) {
119 if (sha
->sas_phy
[j
] == phy
)
123 phyno
[n
] = (j
>= mvi
->chip
->n_phy
) ?
124 (j
- mvi
->chip
->n_phy
) : j
;
135 struct mvs_device
*mvs_find_dev_by_reg_set(struct mvs_info
*mvi
,
139 for (dev_no
= 0; dev_no
< MVS_MAX_DEVICES
; dev_no
++) {
140 if (mvi
->devices
[dev_no
].taskfileset
== MVS_ID_NOT_MAPPED
)
143 if (mvi
->devices
[dev_no
].taskfileset
== reg_set
)
144 return &mvi
->devices
[dev_no
];
149 static inline void mvs_free_reg_set(struct mvs_info
*mvi
,
150 struct mvs_device
*dev
)
153 mv_printk("device has been free.\n");
156 if (dev
->taskfileset
== MVS_ID_NOT_MAPPED
)
158 MVS_CHIP_DISP
->free_reg_set(mvi
, &dev
->taskfileset
);
161 static inline u8
mvs_assign_reg_set(struct mvs_info
*mvi
,
162 struct mvs_device
*dev
)
164 if (dev
->taskfileset
!= MVS_ID_NOT_MAPPED
)
166 return MVS_CHIP_DISP
->assign_reg_set(mvi
, &dev
->taskfileset
);
169 void mvs_phys_reset(struct mvs_info
*mvi
, u32 phy_mask
, int hard
)
172 for_each_phy(phy_mask
, phy_mask
, no
) {
175 MVS_CHIP_DISP
->phy_reset(mvi
, no
, hard
);
179 int mvs_phy_control(struct asd_sas_phy
*sas_phy
, enum phy_func func
,
182 int rc
= 0, phy_id
= sas_phy
->id
;
184 struct sas_ha_struct
*sha
= sas_phy
->ha
;
185 struct mvs_info
*mvi
= NULL
;
187 while (sha
->sas_phy
[i
]) {
188 if (sha
->sas_phy
[i
] == sas_phy
)
192 hi
= i
/((struct mvs_prv_info
*)sha
->lldd_ha
)->n_phy
;
193 mvi
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->mvi
[hi
];
196 case PHY_FUNC_SET_LINK_RATE
:
197 MVS_CHIP_DISP
->phy_set_link_rate(mvi
, phy_id
, funcdata
);
200 case PHY_FUNC_HARD_RESET
:
201 tmp
= MVS_CHIP_DISP
->read_phy_ctl(mvi
, phy_id
);
202 if (tmp
& PHY_RST_HARD
)
204 MVS_CHIP_DISP
->phy_reset(mvi
, phy_id
, MVS_HARD_RESET
);
207 case PHY_FUNC_LINK_RESET
:
208 MVS_CHIP_DISP
->phy_enable(mvi
, phy_id
);
209 MVS_CHIP_DISP
->phy_reset(mvi
, phy_id
, MVS_SOFT_RESET
);
212 case PHY_FUNC_DISABLE
:
213 MVS_CHIP_DISP
->phy_disable(mvi
, phy_id
);
215 case PHY_FUNC_RELEASE_SPINUP_HOLD
:
223 void __devinit
mvs_set_sas_addr(struct mvs_info
*mvi
, int port_id
,
224 u32 off_lo
, u32 off_hi
, u64 sas_addr
)
226 u32 lo
= (u32
)sas_addr
;
227 u32 hi
= (u32
)(sas_addr
>>32);
229 MVS_CHIP_DISP
->write_port_cfg_addr(mvi
, port_id
, off_lo
);
230 MVS_CHIP_DISP
->write_port_cfg_data(mvi
, port_id
, lo
);
231 MVS_CHIP_DISP
->write_port_cfg_addr(mvi
, port_id
, off_hi
);
232 MVS_CHIP_DISP
->write_port_cfg_data(mvi
, port_id
, hi
);
235 static void mvs_bytes_dmaed(struct mvs_info
*mvi
, int i
)
237 struct mvs_phy
*phy
= &mvi
->phy
[i
];
238 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
239 struct sas_ha_struct
*sas_ha
;
240 if (!phy
->phy_attached
)
243 if (!(phy
->att_dev_info
& PORT_DEV_TRGT_MASK
)
244 && phy
->phy_type
& PORT_TYPE_SAS
) {
249 sas_ha
->notify_phy_event(sas_phy
, PHYE_OOB_DONE
);
252 struct sas_phy
*sphy
= sas_phy
->phy
;
254 sphy
->negotiated_linkrate
= sas_phy
->linkrate
;
255 sphy
->minimum_linkrate
= phy
->minimum_linkrate
;
256 sphy
->minimum_linkrate_hw
= SAS_LINK_RATE_1_5_GBPS
;
257 sphy
->maximum_linkrate
= phy
->maximum_linkrate
;
258 sphy
->maximum_linkrate_hw
= MVS_CHIP_DISP
->phy_max_link_rate();
261 if (phy
->phy_type
& PORT_TYPE_SAS
) {
262 struct sas_identify_frame
*id
;
264 id
= (struct sas_identify_frame
*)phy
->frame_rcvd
;
265 id
->dev_type
= phy
->identify
.device_type
;
266 id
->initiator_bits
= SAS_PROTOCOL_ALL
;
267 id
->target_bits
= phy
->identify
.target_port_protocols
;
268 } else if (phy
->phy_type
& PORT_TYPE_SATA
) {
271 mv_dprintk("phy %d byte dmaded.\n", i
+ mvi
->id
* mvi
->chip
->n_phy
);
273 sas_phy
->frame_rcvd_size
= phy
->frame_rcvd_size
;
275 mvi
->sas
->notify_port_event(sas_phy
,
279 int mvs_slave_alloc(struct scsi_device
*scsi_dev
)
281 struct domain_device
*dev
= sdev_to_domain_dev(scsi_dev
);
282 if (dev_is_sata(dev
)) {
283 /* We don't need to rescan targets
284 * if REPORT_LUNS request is failed
286 if (scsi_dev
->lun
> 0)
288 scsi_dev
->tagged_supported
= 1;
291 return sas_slave_alloc(scsi_dev
);
294 int mvs_slave_configure(struct scsi_device
*sdev
)
296 struct domain_device
*dev
= sdev_to_domain_dev(sdev
);
297 int ret
= sas_slave_configure(sdev
);
301 if (!dev_is_sata(dev
)) {
302 sas_change_queue_depth(sdev
,
304 SCSI_QDEPTH_DEFAULT
);
309 void mvs_scan_start(struct Scsi_Host
*shost
)
312 unsigned short core_nr
;
313 struct mvs_info
*mvi
;
314 struct sas_ha_struct
*sha
= SHOST_TO_SAS_HA(shost
);
315 struct mvs_prv_info
*mvs_prv
= sha
->lldd_ha
;
317 core_nr
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->n_host
;
319 for (j
= 0; j
< core_nr
; j
++) {
320 mvi
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->mvi
[j
];
321 for (i
= 0; i
< mvi
->chip
->n_phy
; ++i
)
322 mvs_bytes_dmaed(mvi
, i
);
324 mvs_prv
->scan_finished
= 1;
327 int mvs_scan_finished(struct Scsi_Host
*shost
, unsigned long time
)
329 struct sas_ha_struct
*sha
= SHOST_TO_SAS_HA(shost
);
330 struct mvs_prv_info
*mvs_prv
= sha
->lldd_ha
;
332 if (mvs_prv
->scan_finished
== 0)
335 scsi_flush_work(shost
);
339 static int mvs_task_prep_smp(struct mvs_info
*mvi
,
340 struct mvs_task_exec_info
*tei
)
343 struct sas_task
*task
= tei
->task
;
344 struct mvs_cmd_hdr
*hdr
= tei
->hdr
;
345 struct domain_device
*dev
= task
->dev
;
346 struct asd_sas_port
*sas_port
= dev
->port
;
347 struct scatterlist
*sg_req
, *sg_resp
;
348 u32 req_len
, resp_len
, tag
= tei
->tag
;
351 dma_addr_t buf_tmp_dma
;
353 struct mvs_slot_info
*slot
= &mvi
->slot_info
[tag
];
354 u32 flags
= (tei
->n_elem
<< MCH_PRD_LEN_SHIFT
);
357 * DMA-map SMP request, response buffers
359 sg_req
= &task
->smp_task
.smp_req
;
360 elem
= dma_map_sg(mvi
->dev
, sg_req
, 1, PCI_DMA_TODEVICE
);
363 req_len
= sg_dma_len(sg_req
);
365 sg_resp
= &task
->smp_task
.smp_resp
;
366 elem
= dma_map_sg(mvi
->dev
, sg_resp
, 1, PCI_DMA_FROMDEVICE
);
371 resp_len
= SB_RFB_MAX
;
373 /* must be in dwords */
374 if ((req_len
& 0x3) || (resp_len
& 0x3)) {
380 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
383 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */
385 buf_tmp_dma
= slot
->buf_dma
;
387 hdr
->cmd_tbl
= cpu_to_le64(sg_dma_address(sg_req
));
389 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
391 hdr
->open_frame
= cpu_to_le64(buf_tmp_dma
);
393 buf_tmp
+= MVS_OAF_SZ
;
394 buf_tmp_dma
+= MVS_OAF_SZ
;
396 /* region 3: PRD table *********************************** */
399 hdr
->prd_tbl
= cpu_to_le64(buf_tmp_dma
);
403 i
= MVS_CHIP_DISP
->prd_size() * tei
->n_elem
;
407 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
408 slot
->response
= buf_tmp
;
409 hdr
->status_buf
= cpu_to_le64(buf_tmp_dma
);
410 if (mvi
->flags
& MVF_FLAG_SOC
)
411 hdr
->reserved
[0] = 0;
414 * Fill in TX ring and command slot header
416 slot
->tx
= mvi
->tx_prod
;
417 mvi
->tx
[mvi
->tx_prod
] = cpu_to_le32((TXQ_CMD_SMP
<< TXQ_CMD_SHIFT
) |
419 (sas_port
->phy_mask
<< TXQ_PHY_SHIFT
));
422 hdr
->lens
= cpu_to_le32(((resp_len
/ 4) << 16) | ((req_len
- 4) / 4));
423 hdr
->tags
= cpu_to_le32(tag
);
426 /* generate open address frame hdr (first 12 bytes) */
427 /* initiator, SMP, ftype 1h */
428 buf_oaf
[0] = (1 << 7) | (PROTOCOL_SMP
<< 4) | 0x01;
429 buf_oaf
[1] = dev
->linkrate
& 0xf;
430 *(u16
*)(buf_oaf
+ 2) = 0xFFFF; /* SAS SPEC */
431 memcpy(buf_oaf
+ 4, dev
->sas_addr
, SAS_ADDR_SIZE
);
433 /* fill in PRD (scatter/gather) table, if any */
434 MVS_CHIP_DISP
->make_prd(task
->scatter
, tei
->n_elem
, buf_prd
);
439 dma_unmap_sg(mvi
->dev
, &tei
->task
->smp_task
.smp_resp
, 1,
442 dma_unmap_sg(mvi
->dev
, &tei
->task
->smp_task
.smp_req
, 1,
447 static u32
mvs_get_ncq_tag(struct sas_task
*task
, u32
*tag
)
449 struct ata_queued_cmd
*qc
= task
->uldd_task
;
452 if (qc
->tf
.command
== ATA_CMD_FPDMA_WRITE
||
453 qc
->tf
.command
== ATA_CMD_FPDMA_READ
) {
462 static int mvs_task_prep_ata(struct mvs_info
*mvi
,
463 struct mvs_task_exec_info
*tei
)
465 struct sas_task
*task
= tei
->task
;
466 struct domain_device
*dev
= task
->dev
;
467 struct mvs_device
*mvi_dev
= dev
->lldd_dev
;
468 struct mvs_cmd_hdr
*hdr
= tei
->hdr
;
469 struct asd_sas_port
*sas_port
= dev
->port
;
470 struct mvs_slot_info
*slot
;
472 u32 tag
= tei
->tag
, hdr_tag
;
475 u8
*buf_cmd
, *buf_oaf
;
476 dma_addr_t buf_tmp_dma
;
477 u32 i
, req_len
, resp_len
;
478 const u32 max_resp_len
= SB_RFB_MAX
;
480 if (mvs_assign_reg_set(mvi
, mvi_dev
) == MVS_ID_NOT_MAPPED
) {
481 mv_dprintk("Have not enough regiset for dev %d.\n",
485 slot
= &mvi
->slot_info
[tag
];
486 slot
->tx
= mvi
->tx_prod
;
487 del_q
= TXQ_MODE_I
| tag
|
488 (TXQ_CMD_STP
<< TXQ_CMD_SHIFT
) |
489 (sas_port
->phy_mask
<< TXQ_PHY_SHIFT
) |
490 (mvi_dev
->taskfileset
<< TXQ_SRS_SHIFT
);
491 mvi
->tx
[mvi
->tx_prod
] = cpu_to_le32(del_q
);
493 if (task
->data_dir
== DMA_FROM_DEVICE
)
494 flags
= (MVS_CHIP_DISP
->prd_count() << MCH_PRD_LEN_SHIFT
);
496 flags
= (tei
->n_elem
<< MCH_PRD_LEN_SHIFT
);
498 if (task
->ata_task
.use_ncq
)
500 if (dev
->sata_dev
.command_set
== ATAPI_COMMAND_SET
) {
501 if (task
->ata_task
.fis
.command
!= ATA_CMD_ID_ATAPI
)
505 hdr
->flags
= cpu_to_le32(flags
);
507 if (task
->ata_task
.use_ncq
&& mvs_get_ncq_tag(task
, &hdr_tag
))
508 task
->ata_task
.fis
.sector_count
|= (u8
) (hdr_tag
<< 3);
512 hdr
->tags
= cpu_to_le32(hdr_tag
);
514 hdr
->data_len
= cpu_to_le32(task
->total_xfer_len
);
517 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
520 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
521 buf_cmd
= buf_tmp
= slot
->buf
;
522 buf_tmp_dma
= slot
->buf_dma
;
524 hdr
->cmd_tbl
= cpu_to_le64(buf_tmp_dma
);
526 buf_tmp
+= MVS_ATA_CMD_SZ
;
527 buf_tmp_dma
+= MVS_ATA_CMD_SZ
;
529 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
530 /* used for STP. unused for SATA? */
532 hdr
->open_frame
= cpu_to_le64(buf_tmp_dma
);
534 buf_tmp
+= MVS_OAF_SZ
;
535 buf_tmp_dma
+= MVS_OAF_SZ
;
537 /* region 3: PRD table ********************************************* */
541 hdr
->prd_tbl
= cpu_to_le64(buf_tmp_dma
);
544 i
= MVS_CHIP_DISP
->prd_size() * MVS_CHIP_DISP
->prd_count();
549 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
550 slot
->response
= buf_tmp
;
551 hdr
->status_buf
= cpu_to_le64(buf_tmp_dma
);
552 if (mvi
->flags
& MVF_FLAG_SOC
)
553 hdr
->reserved
[0] = 0;
555 req_len
= sizeof(struct host_to_dev_fis
);
556 resp_len
= MVS_SLOT_BUF_SZ
- MVS_ATA_CMD_SZ
-
557 sizeof(struct mvs_err_info
) - i
;
559 /* request, response lengths */
560 resp_len
= min(resp_len
, max_resp_len
);
561 hdr
->lens
= cpu_to_le32(((resp_len
/ 4) << 16) | (req_len
/ 4));
563 if (likely(!task
->ata_task
.device_control_reg_update
))
564 task
->ata_task
.fis
.flags
|= 0x80; /* C=1: update ATA cmd reg */
565 /* fill in command FIS and ATAPI CDB */
566 memcpy(buf_cmd
, &task
->ata_task
.fis
, sizeof(struct host_to_dev_fis
));
567 if (dev
->sata_dev
.command_set
== ATAPI_COMMAND_SET
)
568 memcpy(buf_cmd
+ STP_ATAPI_CMD
,
569 task
->ata_task
.atapi_packet
, 16);
571 /* generate open address frame hdr (first 12 bytes) */
572 /* initiator, STP, ftype 1h */
573 buf_oaf
[0] = (1 << 7) | (PROTOCOL_STP
<< 4) | 0x1;
574 buf_oaf
[1] = dev
->linkrate
& 0xf;
575 *(u16
*)(buf_oaf
+ 2) = cpu_to_be16(mvi_dev
->device_id
+ 1);
576 memcpy(buf_oaf
+ 4, dev
->sas_addr
, SAS_ADDR_SIZE
);
578 /* fill in PRD (scatter/gather) table, if any */
579 MVS_CHIP_DISP
->make_prd(task
->scatter
, tei
->n_elem
, buf_prd
);
581 if (task
->data_dir
== DMA_FROM_DEVICE
)
582 MVS_CHIP_DISP
->dma_fix(mvi
, sas_port
->phy_mask
,
583 TRASH_BUCKET_SIZE
, tei
->n_elem
, buf_prd
);
588 static int mvs_task_prep_ssp(struct mvs_info
*mvi
,
589 struct mvs_task_exec_info
*tei
, int is_tmf
,
590 struct mvs_tmf_task
*tmf
)
592 struct sas_task
*task
= tei
->task
;
593 struct mvs_cmd_hdr
*hdr
= tei
->hdr
;
594 struct mvs_port
*port
= tei
->port
;
595 struct domain_device
*dev
= task
->dev
;
596 struct mvs_device
*mvi_dev
= dev
->lldd_dev
;
597 struct asd_sas_port
*sas_port
= dev
->port
;
598 struct mvs_slot_info
*slot
;
600 struct ssp_frame_hdr
*ssp_hdr
;
602 u8
*buf_cmd
, *buf_oaf
, fburst
= 0;
603 dma_addr_t buf_tmp_dma
;
605 u32 resp_len
, req_len
, i
, tag
= tei
->tag
;
606 const u32 max_resp_len
= SB_RFB_MAX
;
609 slot
= &mvi
->slot_info
[tag
];
611 phy_mask
= ((port
->wide_port_phymap
) ? port
->wide_port_phymap
:
612 sas_port
->phy_mask
) & TXQ_PHY_MASK
;
614 slot
->tx
= mvi
->tx_prod
;
615 mvi
->tx
[mvi
->tx_prod
] = cpu_to_le32(TXQ_MODE_I
| tag
|
616 (TXQ_CMD_SSP
<< TXQ_CMD_SHIFT
) |
617 (phy_mask
<< TXQ_PHY_SHIFT
));
620 if (task
->ssp_task
.enable_first_burst
) {
625 flags
|= (MCH_SSP_FR_TASK
<< MCH_SSP_FR_TYPE_SHIFT
);
627 flags
|= (MCH_SSP_FR_CMD
<< MCH_SSP_FR_TYPE_SHIFT
);
629 hdr
->flags
= cpu_to_le32(flags
| (tei
->n_elem
<< MCH_PRD_LEN_SHIFT
));
630 hdr
->tags
= cpu_to_le32(tag
);
631 hdr
->data_len
= cpu_to_le32(task
->total_xfer_len
);
634 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
637 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
638 buf_cmd
= buf_tmp
= slot
->buf
;
639 buf_tmp_dma
= slot
->buf_dma
;
641 hdr
->cmd_tbl
= cpu_to_le64(buf_tmp_dma
);
643 buf_tmp
+= MVS_SSP_CMD_SZ
;
644 buf_tmp_dma
+= MVS_SSP_CMD_SZ
;
646 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
648 hdr
->open_frame
= cpu_to_le64(buf_tmp_dma
);
650 buf_tmp
+= MVS_OAF_SZ
;
651 buf_tmp_dma
+= MVS_OAF_SZ
;
653 /* region 3: PRD table ********************************************* */
656 hdr
->prd_tbl
= cpu_to_le64(buf_tmp_dma
);
660 i
= MVS_CHIP_DISP
->prd_size() * tei
->n_elem
;
664 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
665 slot
->response
= buf_tmp
;
666 hdr
->status_buf
= cpu_to_le64(buf_tmp_dma
);
667 if (mvi
->flags
& MVF_FLAG_SOC
)
668 hdr
->reserved
[0] = 0;
670 resp_len
= MVS_SLOT_BUF_SZ
- MVS_SSP_CMD_SZ
- MVS_OAF_SZ
-
671 sizeof(struct mvs_err_info
) - i
;
672 resp_len
= min(resp_len
, max_resp_len
);
674 req_len
= sizeof(struct ssp_frame_hdr
) + 28;
676 /* request, response lengths */
677 hdr
->lens
= cpu_to_le32(((resp_len
/ 4) << 16) | (req_len
/ 4));
679 /* generate open address frame hdr (first 12 bytes) */
680 /* initiator, SSP, ftype 1h */
681 buf_oaf
[0] = (1 << 7) | (PROTOCOL_SSP
<< 4) | 0x1;
682 buf_oaf
[1] = dev
->linkrate
& 0xf;
683 *(u16
*)(buf_oaf
+ 2) = cpu_to_be16(mvi_dev
->device_id
+ 1);
684 memcpy(buf_oaf
+ 4, dev
->sas_addr
, SAS_ADDR_SIZE
);
686 /* fill in SSP frame header (Command Table.SSP frame header) */
687 ssp_hdr
= (struct ssp_frame_hdr
*)buf_cmd
;
690 ssp_hdr
->frame_type
= SSP_TASK
;
692 ssp_hdr
->frame_type
= SSP_COMMAND
;
694 memcpy(ssp_hdr
->hashed_dest_addr
, dev
->hashed_sas_addr
,
695 HASHED_SAS_ADDR_SIZE
);
696 memcpy(ssp_hdr
->hashed_src_addr
,
697 dev
->hashed_sas_addr
, HASHED_SAS_ADDR_SIZE
);
698 ssp_hdr
->tag
= cpu_to_be16(tag
);
700 /* fill in IU for TASK and Command Frame */
701 buf_cmd
+= sizeof(*ssp_hdr
);
702 memcpy(buf_cmd
, &task
->ssp_task
.LUN
, 8);
704 if (ssp_hdr
->frame_type
!= SSP_TASK
) {
705 buf_cmd
[9] = fburst
| task
->ssp_task
.task_attr
|
706 (task
->ssp_task
.task_prio
<< 3);
707 memcpy(buf_cmd
+ 12, &task
->ssp_task
.cdb
, 16);
709 buf_cmd
[10] = tmf
->tmf
;
714 (tmf
->tag_of_task_to_be_managed
>> 8) & 0xff;
716 tmf
->tag_of_task_to_be_managed
& 0xff;
722 /* fill in PRD (scatter/gather) table, if any */
723 MVS_CHIP_DISP
->make_prd(task
->scatter
, tei
->n_elem
, buf_prd
);
727 #define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
728 static int mvs_task_prep(struct sas_task
*task
, struct mvs_info
*mvi
, int is_tmf
,
729 struct mvs_tmf_task
*tmf
, int *pass
)
731 struct domain_device
*dev
= task
->dev
;
732 struct mvs_device
*mvi_dev
= dev
->lldd_dev
;
733 struct mvs_task_exec_info tei
;
734 struct mvs_slot_info
*slot
;
735 u32 tag
= 0xdeadbeef, n_elem
= 0;
739 struct task_status_struct
*tsm
= &task
->task_status
;
741 tsm
->resp
= SAS_TASK_UNDELIVERED
;
742 tsm
->stat
= SAS_PHY_DOWN
;
744 * libsas will use dev->port, should
745 * not call task_done for sata
747 if (dev
->dev_type
!= SATA_DEV
)
748 task
->task_done(task
);
752 if (DEV_IS_GONE(mvi_dev
)) {
754 mv_dprintk("device %d not ready.\n",
757 mv_dprintk("device %016llx not ready.\n",
758 SAS_ADDR(dev
->sas_addr
));
763 tei
.port
= dev
->port
->lldd_port
;
764 if (tei
.port
&& !tei
.port
->port_attached
&& !tmf
) {
765 if (sas_protocol_ata(task
->task_proto
)) {
766 struct task_status_struct
*ts
= &task
->task_status
;
767 mv_dprintk("SATA/STP port %d does not attach"
768 "device.\n", dev
->port
->id
);
769 ts
->resp
= SAS_TASK_COMPLETE
;
770 ts
->stat
= SAS_PHY_DOWN
;
772 task
->task_done(task
);
775 struct task_status_struct
*ts
= &task
->task_status
;
776 mv_dprintk("SAS port %d does not attach"
777 "device.\n", dev
->port
->id
);
778 ts
->resp
= SAS_TASK_UNDELIVERED
;
779 ts
->stat
= SAS_PHY_DOWN
;
780 task
->task_done(task
);
785 if (!sas_protocol_ata(task
->task_proto
)) {
786 if (task
->num_scatter
) {
787 n_elem
= dma_map_sg(mvi
->dev
,
797 n_elem
= task
->num_scatter
;
800 rc
= mvs_tag_alloc(mvi
, &tag
);
804 slot
= &mvi
->slot_info
[tag
];
806 task
->lldd_task
= NULL
;
807 slot
->n_elem
= n_elem
;
808 slot
->slot_tag
= tag
;
810 slot
->buf
= pci_pool_alloc(mvi
->dma_pool
, GFP_ATOMIC
, &slot
->buf_dma
);
813 memset(slot
->buf
, 0, MVS_SLOT_BUF_SZ
);
816 tei
.hdr
= &mvi
->slot
[tag
];
819 switch (task
->task_proto
) {
820 case SAS_PROTOCOL_SMP
:
821 rc
= mvs_task_prep_smp(mvi
, &tei
);
823 case SAS_PROTOCOL_SSP
:
824 rc
= mvs_task_prep_ssp(mvi
, &tei
, is_tmf
, tmf
);
826 case SAS_PROTOCOL_SATA
:
827 case SAS_PROTOCOL_STP
:
828 case SAS_PROTOCOL_SATA
| SAS_PROTOCOL_STP
:
829 rc
= mvs_task_prep_ata(mvi
, &tei
);
832 dev_printk(KERN_ERR
, mvi
->dev
,
833 "unknown sas_task proto: 0x%x\n",
840 mv_dprintk("rc is %x\n", rc
);
841 goto err_out_slot_buf
;
844 slot
->port
= tei
.port
;
845 task
->lldd_task
= slot
;
846 list_add_tail(&slot
->entry
, &tei
.port
->list
);
847 spin_lock(&task
->task_state_lock
);
848 task
->task_state_flags
|= SAS_TASK_AT_INITIATOR
;
849 spin_unlock(&task
->task_state_lock
);
851 mvi_dev
->running_req
++;
853 mvi
->tx_prod
= (mvi
->tx_prod
+ 1) & (MVS_CHIP_SLOT_SZ
- 1);
858 pci_pool_free(mvi
->dma_pool
, slot
->buf
, slot
->buf_dma
);
860 mvs_tag_free(mvi
, tag
);
863 dev_printk(KERN_ERR
, mvi
->dev
, "mvsas prep failed[%d]!\n", rc
);
864 if (!sas_protocol_ata(task
->task_proto
))
866 dma_unmap_sg(mvi
->dev
, task
->scatter
, n_elem
,
872 static struct mvs_task_list
*mvs_task_alloc_list(int *num
, gfp_t gfp_flags
)
874 struct mvs_task_list
*first
= NULL
;
876 for (; *num
> 0; --*num
) {
877 struct mvs_task_list
*mvs_list
= kmem_cache_zalloc(mvs_task_list_cache
, gfp_flags
);
882 INIT_LIST_HEAD(&mvs_list
->list
);
886 list_add_tail(&mvs_list
->list
, &first
->list
);
893 static inline void mvs_task_free_list(struct mvs_task_list
*mvs_list
)
896 struct list_head
*pos
, *a
;
897 struct mvs_task_list
*mlist
= NULL
;
899 __list_add(&list
, mvs_list
->list
.prev
, &mvs_list
->list
);
901 list_for_each_safe(pos
, a
, &list
) {
903 mlist
= list_entry(pos
, struct mvs_task_list
, list
);
904 kmem_cache_free(mvs_task_list_cache
, mlist
);
908 static int mvs_task_exec(struct sas_task
*task
, const int num
, gfp_t gfp_flags
,
909 struct completion
*completion
, int is_tmf
,
910 struct mvs_tmf_task
*tmf
)
912 struct domain_device
*dev
= task
->dev
;
913 struct mvs_info
*mvi
= NULL
;
916 unsigned long flags
= 0;
918 mvi
= ((struct mvs_device
*)task
->dev
->lldd_dev
)->mvi_info
;
920 if ((dev
->dev_type
== SATA_DEV
) && (dev
->sata_dev
.ap
!= NULL
))
921 spin_unlock_irq(dev
->sata_dev
.ap
->lock
);
923 spin_lock_irqsave(&mvi
->lock
, flags
);
924 rc
= mvs_task_prep(task
, mvi
, is_tmf
, tmf
, &pass
);
926 dev_printk(KERN_ERR
, mvi
->dev
, "mvsas exec failed[%d]!\n", rc
);
929 MVS_CHIP_DISP
->start_delivery(mvi
, (mvi
->tx_prod
- 1) &
930 (MVS_CHIP_SLOT_SZ
- 1));
931 spin_unlock_irqrestore(&mvi
->lock
, flags
);
933 if ((dev
->dev_type
== SATA_DEV
) && (dev
->sata_dev
.ap
!= NULL
))
934 spin_lock_irq(dev
->sata_dev
.ap
->lock
);
939 static int mvs_collector_task_exec(struct sas_task
*task
, const int num
, gfp_t gfp_flags
,
940 struct completion
*completion
, int is_tmf
,
941 struct mvs_tmf_task
*tmf
)
943 struct domain_device
*dev
= task
->dev
;
944 struct mvs_prv_info
*mpi
= dev
->port
->ha
->lldd_ha
;
945 struct mvs_info
*mvi
= NULL
;
946 struct sas_task
*t
= task
;
947 struct mvs_task_list
*mvs_list
= NULL
, *a
;
952 unsigned long flags
= 0;
954 mvs_list
= mvs_task_alloc_list(&n
, gfp_flags
);
956 printk(KERN_ERR
"%s: mvs alloc list failed.\n", __func__
);
961 __list_add(&q
, mvs_list
->list
.prev
, &mvs_list
->list
);
963 list_for_each_entry(a
, &q
, list
) {
965 t
= list_entry(t
->list
.next
, struct sas_task
, list
);
968 list_for_each_entry(a
, &q
, list
) {
971 mvi
= ((struct mvs_device
*)t
->dev
->lldd_dev
)->mvi_info
;
973 spin_lock_irqsave(&mvi
->lock
, flags
);
974 rc
= mvs_task_prep(t
, mvi
, is_tmf
, tmf
, &pass
[mvi
->id
]);
976 dev_printk(KERN_ERR
, mvi
->dev
, "mvsas exec failed[%d]!\n", rc
);
977 spin_unlock_irqrestore(&mvi
->lock
, flags
);
981 MVS_CHIP_DISP
->start_delivery(mpi
->mvi
[0],
982 (mpi
->mvi
[0]->tx_prod
- 1) & (MVS_CHIP_SLOT_SZ
- 1));
985 MVS_CHIP_DISP
->start_delivery(mpi
->mvi
[1],
986 (mpi
->mvi
[1]->tx_prod
- 1) & (MVS_CHIP_SLOT_SZ
- 1));
992 mvs_task_free_list(mvs_list
);
997 int mvs_queue_command(struct sas_task
*task
, const int num
,
1000 struct mvs_device
*mvi_dev
= task
->dev
->lldd_dev
;
1001 struct sas_ha_struct
*sas
= mvi_dev
->mvi_info
->sas
;
1003 if (sas
->lldd_max_execute_num
< 2)
1004 return mvs_task_exec(task
, num
, gfp_flags
, NULL
, 0, NULL
);
1006 return mvs_collector_task_exec(task
, num
, gfp_flags
, NULL
, 0, NULL
);
1009 static void mvs_slot_free(struct mvs_info
*mvi
, u32 rx_desc
)
1011 u32 slot_idx
= rx_desc
& RXQ_SLOT_MASK
;
1012 mvs_tag_clear(mvi
, slot_idx
);
1015 static void mvs_slot_task_free(struct mvs_info
*mvi
, struct sas_task
*task
,
1016 struct mvs_slot_info
*slot
, u32 slot_idx
)
1020 if (!sas_protocol_ata(task
->task_proto
))
1022 dma_unmap_sg(mvi
->dev
, task
->scatter
,
1023 slot
->n_elem
, task
->data_dir
);
1025 switch (task
->task_proto
) {
1026 case SAS_PROTOCOL_SMP
:
1027 dma_unmap_sg(mvi
->dev
, &task
->smp_task
.smp_resp
, 1,
1028 PCI_DMA_FROMDEVICE
);
1029 dma_unmap_sg(mvi
->dev
, &task
->smp_task
.smp_req
, 1,
1033 case SAS_PROTOCOL_SATA
:
1034 case SAS_PROTOCOL_STP
:
1035 case SAS_PROTOCOL_SSP
:
1042 pci_pool_free(mvi
->dma_pool
, slot
->buf
, slot
->buf_dma
);
1045 list_del_init(&slot
->entry
);
1046 task
->lldd_task
= NULL
;
1049 slot
->slot_tag
= 0xFFFFFFFF;
1050 mvs_slot_free(mvi
, slot_idx
);
1053 static void mvs_update_wideport(struct mvs_info
*mvi
, int phy_no
)
1055 struct mvs_phy
*phy
= &mvi
->phy
[phy_no
];
1056 struct mvs_port
*port
= phy
->port
;
1059 for_each_phy(port
->wide_port_phymap
, j
, no
) {
1061 MVS_CHIP_DISP
->write_port_cfg_addr(mvi
, no
,
1063 MVS_CHIP_DISP
->write_port_cfg_data(mvi
, no
,
1064 port
->wide_port_phymap
);
1066 MVS_CHIP_DISP
->write_port_cfg_addr(mvi
, no
,
1068 MVS_CHIP_DISP
->write_port_cfg_data(mvi
, no
,
1074 static u32
mvs_is_phy_ready(struct mvs_info
*mvi
, int i
)
1077 struct mvs_phy
*phy
= &mvi
->phy
[i
];
1078 struct mvs_port
*port
= phy
->port
;
1080 tmp
= MVS_CHIP_DISP
->read_phy_ctl(mvi
, i
);
1081 if ((tmp
& PHY_READY_MASK
) && !(phy
->irq_status
& PHYEV_POOF
)) {
1083 phy
->phy_attached
= 1;
1088 if (phy
->phy_type
& PORT_TYPE_SAS
) {
1089 port
->wide_port_phymap
&= ~(1U << i
);
1090 if (!port
->wide_port_phymap
)
1091 port
->port_attached
= 0;
1092 mvs_update_wideport(mvi
, i
);
1093 } else if (phy
->phy_type
& PORT_TYPE_SATA
)
1094 port
->port_attached
= 0;
1096 phy
->phy_attached
= 0;
1097 phy
->phy_type
&= ~(PORT_TYPE_SAS
| PORT_TYPE_SATA
);
1102 static void *mvs_get_d2h_reg(struct mvs_info
*mvi
, int i
, void *buf
)
1104 u32
*s
= (u32
*) buf
;
1109 MVS_CHIP_DISP
->write_port_cfg_addr(mvi
, i
, PHYR_SATA_SIG3
);
1110 s
[3] = cpu_to_le32(MVS_CHIP_DISP
->read_port_cfg_data(mvi
, i
));
1112 MVS_CHIP_DISP
->write_port_cfg_addr(mvi
, i
, PHYR_SATA_SIG2
);
1113 s
[2] = cpu_to_le32(MVS_CHIP_DISP
->read_port_cfg_data(mvi
, i
));
1115 MVS_CHIP_DISP
->write_port_cfg_addr(mvi
, i
, PHYR_SATA_SIG1
);
1116 s
[1] = cpu_to_le32(MVS_CHIP_DISP
->read_port_cfg_data(mvi
, i
));
1118 MVS_CHIP_DISP
->write_port_cfg_addr(mvi
, i
, PHYR_SATA_SIG0
);
1119 s
[0] = cpu_to_le32(MVS_CHIP_DISP
->read_port_cfg_data(mvi
, i
));
1121 if (((s
[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8
*)&s
[3] == 0x01))
1122 s
[1] = 0x00EB1401 | (*((u8
*)&s
[1] + 3) & 0x10);
1127 static u32
mvs_is_sig_fis_received(u32 irq_status
)
1129 return irq_status
& PHYEV_SIG_FIS
;
1132 static void mvs_sig_remove_timer(struct mvs_phy
*phy
)
1134 if (phy
->timer
.function
)
1135 del_timer(&phy
->timer
);
1136 phy
->timer
.function
= NULL
;
1139 void mvs_update_phyinfo(struct mvs_info
*mvi
, int i
, int get_st
)
1141 struct mvs_phy
*phy
= &mvi
->phy
[i
];
1142 struct sas_identify_frame
*id
;
1144 id
= (struct sas_identify_frame
*)phy
->frame_rcvd
;
1147 phy
->irq_status
= MVS_CHIP_DISP
->read_port_irq_stat(mvi
, i
);
1148 phy
->phy_status
= mvs_is_phy_ready(mvi
, i
);
1151 if (phy
->phy_status
) {
1153 struct asd_sas_phy
*sas_phy
= &mvi
->phy
[i
].sas_phy
;
1155 oob_done
= MVS_CHIP_DISP
->oob_done(mvi
, i
);
1157 MVS_CHIP_DISP
->fix_phy_info(mvi
, i
, id
);
1158 if (phy
->phy_type
& PORT_TYPE_SATA
) {
1159 phy
->identify
.target_port_protocols
= SAS_PROTOCOL_STP
;
1160 if (mvs_is_sig_fis_received(phy
->irq_status
)) {
1161 mvs_sig_remove_timer(phy
);
1162 phy
->phy_attached
= 1;
1163 phy
->att_dev_sas_addr
=
1164 i
+ mvi
->id
* mvi
->chip
->n_phy
;
1166 sas_phy
->oob_mode
= SATA_OOB_MODE
;
1167 phy
->frame_rcvd_size
=
1168 sizeof(struct dev_to_host_fis
);
1169 mvs_get_d2h_reg(mvi
, i
, id
);
1172 dev_printk(KERN_DEBUG
, mvi
->dev
,
1173 "Phy%d : No sig fis\n", i
);
1174 tmp
= MVS_CHIP_DISP
->read_port_irq_mask(mvi
, i
);
1175 MVS_CHIP_DISP
->write_port_irq_mask(mvi
, i
,
1176 tmp
| PHYEV_SIG_FIS
);
1177 phy
->phy_attached
= 0;
1178 phy
->phy_type
&= ~PORT_TYPE_SATA
;
1181 } else if (phy
->phy_type
& PORT_TYPE_SAS
1182 || phy
->att_dev_info
& PORT_SSP_INIT_MASK
) {
1183 phy
->phy_attached
= 1;
1184 phy
->identify
.device_type
=
1185 phy
->att_dev_info
& PORT_DEV_TYPE_MASK
;
1187 if (phy
->identify
.device_type
== SAS_END_DEV
)
1188 phy
->identify
.target_port_protocols
=
1190 else if (phy
->identify
.device_type
!= NO_DEVICE
)
1191 phy
->identify
.target_port_protocols
=
1194 sas_phy
->oob_mode
= SAS_OOB_MODE
;
1195 phy
->frame_rcvd_size
=
1196 sizeof(struct sas_identify_frame
);
1198 memcpy(sas_phy
->attached_sas_addr
,
1199 &phy
->att_dev_sas_addr
, SAS_ADDR_SIZE
);
1201 if (MVS_CHIP_DISP
->phy_work_around
)
1202 MVS_CHIP_DISP
->phy_work_around(mvi
, i
);
1204 mv_dprintk("phy %d attach dev info is %x\n",
1205 i
+ mvi
->id
* mvi
->chip
->n_phy
, phy
->att_dev_info
);
1206 mv_dprintk("phy %d attach sas addr is %llx\n",
1207 i
+ mvi
->id
* mvi
->chip
->n_phy
, phy
->att_dev_sas_addr
);
1210 MVS_CHIP_DISP
->write_port_irq_stat(mvi
, i
, phy
->irq_status
);
1213 static void mvs_port_notify_formed(struct asd_sas_phy
*sas_phy
, int lock
)
1215 struct sas_ha_struct
*sas_ha
= sas_phy
->ha
;
1216 struct mvs_info
*mvi
= NULL
; int i
= 0, hi
;
1217 struct mvs_phy
*phy
= sas_phy
->lldd_phy
;
1218 struct asd_sas_port
*sas_port
= sas_phy
->port
;
1219 struct mvs_port
*port
;
1220 unsigned long flags
= 0;
1224 while (sas_ha
->sas_phy
[i
]) {
1225 if (sas_ha
->sas_phy
[i
] == sas_phy
)
1229 hi
= i
/((struct mvs_prv_info
*)sas_ha
->lldd_ha
)->n_phy
;
1230 mvi
= ((struct mvs_prv_info
*)sas_ha
->lldd_ha
)->mvi
[hi
];
1231 if (i
>= mvi
->chip
->n_phy
)
1232 port
= &mvi
->port
[i
- mvi
->chip
->n_phy
];
1234 port
= &mvi
->port
[i
];
1236 spin_lock_irqsave(&mvi
->lock
, flags
);
1237 port
->port_attached
= 1;
1239 sas_port
->lldd_port
= port
;
1240 if (phy
->phy_type
& PORT_TYPE_SAS
) {
1241 port
->wide_port_phymap
= sas_port
->phy_mask
;
1242 mv_printk("set wide port phy map %x\n", sas_port
->phy_mask
);
1243 mvs_update_wideport(mvi
, sas_phy
->id
);
1246 spin_unlock_irqrestore(&mvi
->lock
, flags
);
1249 static void mvs_port_notify_deformed(struct asd_sas_phy
*sas_phy
, int lock
)
1251 struct domain_device
*dev
;
1252 struct mvs_phy
*phy
= sas_phy
->lldd_phy
;
1253 struct mvs_info
*mvi
= phy
->mvi
;
1254 struct asd_sas_port
*port
= sas_phy
->port
;
1257 while (phy
!= &mvi
->phy
[phy_no
]) {
1259 if (phy_no
>= MVS_MAX_PHYS
)
1262 list_for_each_entry(dev
, &port
->dev_list
, dev_list_node
)
1263 mvs_do_release_task(phy
->mvi
, phy_no
, dev
);
1268 void mvs_port_formed(struct asd_sas_phy
*sas_phy
)
1270 mvs_port_notify_formed(sas_phy
, 1);
1273 void mvs_port_deformed(struct asd_sas_phy
*sas_phy
)
1275 mvs_port_notify_deformed(sas_phy
, 1);
1278 struct mvs_device
*mvs_alloc_dev(struct mvs_info
*mvi
)
1281 for (dev
= 0; dev
< MVS_MAX_DEVICES
; dev
++) {
1282 if (mvi
->devices
[dev
].dev_type
== NO_DEVICE
) {
1283 mvi
->devices
[dev
].device_id
= dev
;
1284 return &mvi
->devices
[dev
];
1288 if (dev
== MVS_MAX_DEVICES
)
1289 mv_printk("max support %d devices, ignore ..\n",
1295 void mvs_free_dev(struct mvs_device
*mvi_dev
)
1297 u32 id
= mvi_dev
->device_id
;
1298 memset(mvi_dev
, 0, sizeof(*mvi_dev
));
1299 mvi_dev
->device_id
= id
;
1300 mvi_dev
->dev_type
= NO_DEVICE
;
1301 mvi_dev
->dev_status
= MVS_DEV_NORMAL
;
1302 mvi_dev
->taskfileset
= MVS_ID_NOT_MAPPED
;
1305 int mvs_dev_found_notify(struct domain_device
*dev
, int lock
)
1307 unsigned long flags
= 0;
1309 struct mvs_info
*mvi
= NULL
;
1310 struct domain_device
*parent_dev
= dev
->parent
;
1311 struct mvs_device
*mvi_device
;
1313 mvi
= mvs_find_dev_mvi(dev
);
1316 spin_lock_irqsave(&mvi
->lock
, flags
);
1318 mvi_device
= mvs_alloc_dev(mvi
);
1323 dev
->lldd_dev
= mvi_device
;
1324 mvi_device
->dev_status
= MVS_DEV_NORMAL
;
1325 mvi_device
->dev_type
= dev
->dev_type
;
1326 mvi_device
->mvi_info
= mvi
;
1327 mvi_device
->sas_device
= dev
;
1328 if (parent_dev
&& DEV_IS_EXPANDER(parent_dev
->dev_type
)) {
1330 u8 phy_num
= parent_dev
->ex_dev
.num_phys
;
1332 for (phy_id
= 0; phy_id
< phy_num
; phy_id
++) {
1333 phy
= &parent_dev
->ex_dev
.ex_phy
[phy_id
];
1334 if (SAS_ADDR(phy
->attached_sas_addr
) ==
1335 SAS_ADDR(dev
->sas_addr
)) {
1336 mvi_device
->attached_phy
= phy_id
;
1341 if (phy_id
== phy_num
) {
1342 mv_printk("Error: no attached dev:%016llx"
1344 SAS_ADDR(dev
->sas_addr
),
1345 SAS_ADDR(parent_dev
->sas_addr
));
1352 spin_unlock_irqrestore(&mvi
->lock
, flags
);
1356 int mvs_dev_found(struct domain_device
*dev
)
1358 return mvs_dev_found_notify(dev
, 1);
1361 void mvs_dev_gone_notify(struct domain_device
*dev
)
1363 unsigned long flags
= 0;
1364 struct mvs_device
*mvi_dev
= dev
->lldd_dev
;
1365 struct mvs_info
*mvi
= mvi_dev
->mvi_info
;
1367 spin_lock_irqsave(&mvi
->lock
, flags
);
1370 mv_dprintk("found dev[%d:%x] is gone.\n",
1371 mvi_dev
->device_id
, mvi_dev
->dev_type
);
1372 mvs_release_task(mvi
, dev
);
1373 mvs_free_reg_set(mvi
, mvi_dev
);
1374 mvs_free_dev(mvi_dev
);
1376 mv_dprintk("found dev has gone.\n");
1378 dev
->lldd_dev
= NULL
;
1379 mvi_dev
->sas_device
= NULL
;
1381 spin_unlock_irqrestore(&mvi
->lock
, flags
);
1385 void mvs_dev_gone(struct domain_device
*dev
)
1387 mvs_dev_gone_notify(dev
);
1390 static struct sas_task
*mvs_alloc_task(void)
1392 struct sas_task
*task
= kzalloc(sizeof(struct sas_task
), GFP_KERNEL
);
1395 INIT_LIST_HEAD(&task
->list
);
1396 spin_lock_init(&task
->task_state_lock
);
1397 task
->task_state_flags
= SAS_TASK_STATE_PENDING
;
1398 init_timer(&task
->timer
);
1399 init_completion(&task
->completion
);
1404 static void mvs_free_task(struct sas_task
*task
)
1407 BUG_ON(!list_empty(&task
->list
));
1412 static void mvs_task_done(struct sas_task
*task
)
1414 if (!del_timer(&task
->timer
))
1416 complete(&task
->completion
);
1419 static void mvs_tmf_timedout(unsigned long data
)
1421 struct sas_task
*task
= (struct sas_task
*)data
;
1423 task
->task_state_flags
|= SAS_TASK_STATE_ABORTED
;
1424 complete(&task
->completion
);
1427 #define MVS_TASK_TIMEOUT 20
1428 static int mvs_exec_internal_tmf_task(struct domain_device
*dev
,
1429 void *parameter
, u32 para_len
, struct mvs_tmf_task
*tmf
)
1432 struct sas_task
*task
= NULL
;
1434 for (retry
= 0; retry
< 3; retry
++) {
1435 task
= mvs_alloc_task();
1440 task
->task_proto
= dev
->tproto
;
1442 memcpy(&task
->ssp_task
, parameter
, para_len
);
1443 task
->task_done
= mvs_task_done
;
1445 task
->timer
.data
= (unsigned long) task
;
1446 task
->timer
.function
= mvs_tmf_timedout
;
1447 task
->timer
.expires
= jiffies
+ MVS_TASK_TIMEOUT
*HZ
;
1448 add_timer(&task
->timer
);
1450 res
= mvs_task_exec(task
, 1, GFP_KERNEL
, NULL
, 1, tmf
);
1453 del_timer(&task
->timer
);
1454 mv_printk("executing internel task failed:%d\n", res
);
1458 wait_for_completion(&task
->completion
);
1459 res
= TMF_RESP_FUNC_FAILED
;
1460 /* Even TMF timed out, return direct. */
1461 if ((task
->task_state_flags
& SAS_TASK_STATE_ABORTED
)) {
1462 if (!(task
->task_state_flags
& SAS_TASK_STATE_DONE
)) {
1463 mv_printk("TMF task[%x] timeout.\n", tmf
->tmf
);
1468 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1469 task
->task_status
.stat
== SAM_STAT_GOOD
) {
1470 res
= TMF_RESP_FUNC_COMPLETE
;
1474 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1475 task
->task_status
.stat
== SAS_DATA_UNDERRUN
) {
1476 /* no error, but return the number of bytes of
1478 res
= task
->task_status
.residual
;
1482 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1483 task
->task_status
.stat
== SAS_DATA_OVERRUN
) {
1484 mv_dprintk("blocked task error.\n");
1488 mv_dprintk(" task to dev %016llx response: 0x%x "
1490 SAS_ADDR(dev
->sas_addr
),
1491 task
->task_status
.resp
,
1492 task
->task_status
.stat
);
1493 mvs_free_task(task
);
1499 BUG_ON(retry
== 3 && task
!= NULL
);
1501 mvs_free_task(task
);
1505 static int mvs_debug_issue_ssp_tmf(struct domain_device
*dev
,
1506 u8
*lun
, struct mvs_tmf_task
*tmf
)
1508 struct sas_ssp_task ssp_task
;
1509 if (!(dev
->tproto
& SAS_PROTOCOL_SSP
))
1510 return TMF_RESP_FUNC_ESUPP
;
1512 memcpy(ssp_task
.LUN
, lun
, 8);
1514 return mvs_exec_internal_tmf_task(dev
, &ssp_task
,
1515 sizeof(ssp_task
), tmf
);
1519 /* Standard mandates link reset for ATA (type 0)
1520 and hard reset for SSP (type 1) , only for RECOVERY */
1521 static int mvs_debug_I_T_nexus_reset(struct domain_device
*dev
)
1524 struct sas_phy
*phy
= sas_find_local_phy(dev
);
1525 int reset_type
= (dev
->dev_type
== SATA_DEV
||
1526 (dev
->tproto
& SAS_PROTOCOL_STP
)) ? 0 : 1;
1527 rc
= sas_phy_reset(phy
, reset_type
);
1532 /* mandatory SAM-3 */
1533 int mvs_lu_reset(struct domain_device
*dev
, u8
*lun
)
1535 unsigned long flags
;
1536 int rc
= TMF_RESP_FUNC_FAILED
;
1537 struct mvs_tmf_task tmf_task
;
1538 struct mvs_device
* mvi_dev
= dev
->lldd_dev
;
1539 struct mvs_info
*mvi
= mvi_dev
->mvi_info
;
1541 tmf_task
.tmf
= TMF_LU_RESET
;
1542 mvi_dev
->dev_status
= MVS_DEV_EH
;
1543 rc
= mvs_debug_issue_ssp_tmf(dev
, lun
, &tmf_task
);
1544 if (rc
== TMF_RESP_FUNC_COMPLETE
) {
1545 spin_lock_irqsave(&mvi
->lock
, flags
);
1546 mvs_release_task(mvi
, dev
);
1547 spin_unlock_irqrestore(&mvi
->lock
, flags
);
1549 /* If failed, fall-through I_T_Nexus reset */
1550 mv_printk("%s for device[%x]:rc= %d\n", __func__
,
1551 mvi_dev
->device_id
, rc
);
1555 int mvs_I_T_nexus_reset(struct domain_device
*dev
)
1557 unsigned long flags
;
1558 int rc
= TMF_RESP_FUNC_FAILED
;
1559 struct mvs_device
* mvi_dev
= (struct mvs_device
*)dev
->lldd_dev
;
1560 struct mvs_info
*mvi
= mvi_dev
->mvi_info
;
1562 if (mvi_dev
->dev_status
!= MVS_DEV_EH
)
1563 return TMF_RESP_FUNC_COMPLETE
;
1565 mvi_dev
->dev_status
= MVS_DEV_NORMAL
;
1566 rc
= mvs_debug_I_T_nexus_reset(dev
);
1567 mv_printk("%s for device[%x]:rc= %d\n",
1568 __func__
, mvi_dev
->device_id
, rc
);
1570 spin_lock_irqsave(&mvi
->lock
, flags
);
1571 mvs_release_task(mvi
, dev
);
1572 spin_unlock_irqrestore(&mvi
->lock
, flags
);
1576 /* optional SAM-3 */
1577 int mvs_query_task(struct sas_task
*task
)
1580 struct scsi_lun lun
;
1581 struct mvs_tmf_task tmf_task
;
1582 int rc
= TMF_RESP_FUNC_FAILED
;
1584 if (task
->lldd_task
&& task
->task_proto
& SAS_PROTOCOL_SSP
) {
1585 struct scsi_cmnd
* cmnd
= (struct scsi_cmnd
*)task
->uldd_task
;
1586 struct domain_device
*dev
= task
->dev
;
1587 struct mvs_device
*mvi_dev
= (struct mvs_device
*)dev
->lldd_dev
;
1588 struct mvs_info
*mvi
= mvi_dev
->mvi_info
;
1590 int_to_scsilun(cmnd
->device
->lun
, &lun
);
1591 rc
= mvs_find_tag(mvi
, task
, &tag
);
1593 rc
= TMF_RESP_FUNC_FAILED
;
1597 tmf_task
.tmf
= TMF_QUERY_TASK
;
1598 tmf_task
.tag_of_task_to_be_managed
= cpu_to_le16(tag
);
1600 rc
= mvs_debug_issue_ssp_tmf(dev
, lun
.scsi_lun
, &tmf_task
);
1602 /* The task is still in Lun, release it then */
1603 case TMF_RESP_FUNC_SUCC
:
1604 /* The task is not in Lun or failed, reset the phy */
1605 case TMF_RESP_FUNC_FAILED
:
1606 case TMF_RESP_FUNC_COMPLETE
:
1610 mv_printk("%s:rc= %d\n", __func__
, rc
);
1614 /* mandatory SAM-3, still need free task/slot info */
1615 int mvs_abort_task(struct sas_task
*task
)
1617 struct scsi_lun lun
;
1618 struct mvs_tmf_task tmf_task
;
1619 struct domain_device
*dev
= task
->dev
;
1620 struct mvs_device
*mvi_dev
= (struct mvs_device
*)dev
->lldd_dev
;
1621 struct mvs_info
*mvi
;
1622 int rc
= TMF_RESP_FUNC_FAILED
;
1623 unsigned long flags
;
1627 mv_printk("Device has removed\n");
1628 return TMF_RESP_FUNC_FAILED
;
1631 mvi
= mvi_dev
->mvi_info
;
1633 spin_lock_irqsave(&task
->task_state_lock
, flags
);
1634 if (task
->task_state_flags
& SAS_TASK_STATE_DONE
) {
1635 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
1636 rc
= TMF_RESP_FUNC_COMPLETE
;
1639 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
1640 mvi_dev
->dev_status
= MVS_DEV_EH
;
1641 if (task
->lldd_task
&& task
->task_proto
& SAS_PROTOCOL_SSP
) {
1642 struct scsi_cmnd
* cmnd
= (struct scsi_cmnd
*)task
->uldd_task
;
1644 int_to_scsilun(cmnd
->device
->lun
, &lun
);
1645 rc
= mvs_find_tag(mvi
, task
, &tag
);
1647 mv_printk("No such tag in %s\n", __func__
);
1648 rc
= TMF_RESP_FUNC_FAILED
;
1652 tmf_task
.tmf
= TMF_ABORT_TASK
;
1653 tmf_task
.tag_of_task_to_be_managed
= cpu_to_le16(tag
);
1655 rc
= mvs_debug_issue_ssp_tmf(dev
, lun
.scsi_lun
, &tmf_task
);
1657 /* if successful, clear the task and callback forwards.*/
1658 if (rc
== TMF_RESP_FUNC_COMPLETE
) {
1660 struct mvs_slot_info
*slot
;
1662 if (task
->lldd_task
) {
1663 slot
= task
->lldd_task
;
1664 slot_no
= (u32
) (slot
- mvi
->slot_info
);
1665 spin_lock_irqsave(&mvi
->lock
, flags
);
1666 mvs_slot_complete(mvi
, slot_no
, 1);
1667 spin_unlock_irqrestore(&mvi
->lock
, flags
);
1671 } else if (task
->task_proto
& SAS_PROTOCOL_SATA
||
1672 task
->task_proto
& SAS_PROTOCOL_STP
) {
1673 if (SATA_DEV
== dev
->dev_type
) {
1674 struct mvs_slot_info
*slot
= task
->lldd_task
;
1675 u32 slot_idx
= (u32
)(slot
- mvi
->slot_info
);
1676 mv_dprintk("mvs_abort_task() mvi=%p task=%p "
1677 "slot=%p slot_idx=x%x\n",
1678 mvi
, task
, slot
, slot_idx
);
1679 mvs_tmf_timedout((unsigned long)task
);
1680 mvs_slot_task_free(mvi
, task
, slot
, slot_idx
);
1681 rc
= TMF_RESP_FUNC_COMPLETE
;
1687 if (rc
!= TMF_RESP_FUNC_COMPLETE
)
1688 mv_printk("%s:rc= %d\n", __func__
, rc
);
1692 int mvs_abort_task_set(struct domain_device
*dev
, u8
*lun
)
1694 int rc
= TMF_RESP_FUNC_FAILED
;
1695 struct mvs_tmf_task tmf_task
;
1697 tmf_task
.tmf
= TMF_ABORT_TASK_SET
;
1698 rc
= mvs_debug_issue_ssp_tmf(dev
, lun
, &tmf_task
);
1703 int mvs_clear_aca(struct domain_device
*dev
, u8
*lun
)
1705 int rc
= TMF_RESP_FUNC_FAILED
;
1706 struct mvs_tmf_task tmf_task
;
1708 tmf_task
.tmf
= TMF_CLEAR_ACA
;
1709 rc
= mvs_debug_issue_ssp_tmf(dev
, lun
, &tmf_task
);
1714 int mvs_clear_task_set(struct domain_device
*dev
, u8
*lun
)
1716 int rc
= TMF_RESP_FUNC_FAILED
;
1717 struct mvs_tmf_task tmf_task
;
1719 tmf_task
.tmf
= TMF_CLEAR_TASK_SET
;
1720 rc
= mvs_debug_issue_ssp_tmf(dev
, lun
, &tmf_task
);
1725 static int mvs_sata_done(struct mvs_info
*mvi
, struct sas_task
*task
,
1726 u32 slot_idx
, int err
)
1728 struct mvs_device
*mvi_dev
= task
->dev
->lldd_dev
;
1729 struct task_status_struct
*tstat
= &task
->task_status
;
1730 struct ata_task_resp
*resp
= (struct ata_task_resp
*)tstat
->buf
;
1731 int stat
= SAM_STAT_GOOD
;
1734 resp
->frame_len
= sizeof(struct dev_to_host_fis
);
1735 memcpy(&resp
->ending_fis
[0],
1736 SATA_RECEIVED_D2H_FIS(mvi_dev
->taskfileset
),
1737 sizeof(struct dev_to_host_fis
));
1738 tstat
->buf_valid_size
= sizeof(*resp
);
1739 if (unlikely(err
)) {
1740 if (unlikely(err
& CMD_ISS_STPD
))
1741 stat
= SAS_OPEN_REJECT
;
1743 stat
= SAS_PROTO_RESPONSE
;
1749 void mvs_set_sense(u8
*buffer
, int len
, int d_sense
,
1750 int key
, int asc
, int ascq
)
1752 memset(buffer
, 0, len
);
1755 /* Descriptor format */
1757 mv_printk("Length %d of sense buffer too small to "
1758 "fit sense %x:%x:%x", len
, key
, asc
, ascq
);
1761 buffer
[0] = 0x72; /* Response Code */
1763 buffer
[1] = key
; /* Sense Key */
1765 buffer
[2] = asc
; /* ASC */
1767 buffer
[3] = ascq
; /* ASCQ */
1770 mv_printk("Length %d of sense buffer too small to "
1771 "fit sense %x:%x:%x", len
, key
, asc
, ascq
);
1774 buffer
[0] = 0x70; /* Response Code */
1776 buffer
[2] = key
; /* Sense Key */
1778 buffer
[7] = 0x0a; /* Additional Sense Length */
1780 buffer
[12] = asc
; /* ASC */
1782 buffer
[13] = ascq
; /* ASCQ */
1788 void mvs_fill_ssp_resp_iu(struct ssp_response_iu
*iu
,
1789 u8 key
, u8 asc
, u8 asc_q
)
1792 iu
->response_data_len
= 0;
1793 iu
->sense_data_len
= 17;
1795 mvs_set_sense(iu
->sense_data
, 17, 0,
1799 static int mvs_slot_err(struct mvs_info
*mvi
, struct sas_task
*task
,
1802 struct mvs_slot_info
*slot
= &mvi
->slot_info
[slot_idx
];
1804 u32 err_dw0
= le32_to_cpu(*(u32
*)slot
->response
);
1805 u32 err_dw1
= le32_to_cpu(*((u32
*)slot
->response
+ 1));
1807 enum mvs_port_type type
= PORT_TYPE_SAS
;
1809 if (err_dw0
& CMD_ISS_STPD
)
1810 MVS_CHIP_DISP
->issue_stop(mvi
, type
, tfs
);
1812 MVS_CHIP_DISP
->command_active(mvi
, slot_idx
);
1814 stat
= SAM_STAT_CHECK_CONDITION
;
1815 switch (task
->task_proto
) {
1816 case SAS_PROTOCOL_SSP
:
1818 stat
= SAS_ABORTED_TASK
;
1819 if ((err_dw0
& NO_DEST
) || err_dw1
& bit(31)) {
1820 struct ssp_response_iu
*iu
= slot
->response
+
1821 sizeof(struct mvs_err_info
);
1822 mvs_fill_ssp_resp_iu(iu
, NOT_READY
, 0x04, 01);
1823 sas_ssp_task_response(mvi
->dev
, task
, iu
);
1824 stat
= SAM_STAT_CHECK_CONDITION
;
1826 if (err_dw1
& bit(31))
1827 mv_printk("reuse same slot, retry command.\n");
1830 case SAS_PROTOCOL_SMP
:
1831 stat
= SAM_STAT_CHECK_CONDITION
;
1834 case SAS_PROTOCOL_SATA
:
1835 case SAS_PROTOCOL_STP
:
1836 case SAS_PROTOCOL_SATA
| SAS_PROTOCOL_STP
:
1838 task
->ata_task
.use_ncq
= 0;
1839 stat
= SAS_PROTO_RESPONSE
;
1840 mvs_sata_done(mvi
, task
, slot_idx
, err_dw0
);
1850 int mvs_slot_complete(struct mvs_info
*mvi
, u32 rx_desc
, u32 flags
)
1852 u32 slot_idx
= rx_desc
& RXQ_SLOT_MASK
;
1853 struct mvs_slot_info
*slot
= &mvi
->slot_info
[slot_idx
];
1854 struct sas_task
*task
= slot
->task
;
1855 struct mvs_device
*mvi_dev
= NULL
;
1856 struct task_status_struct
*tstat
;
1857 struct domain_device
*dev
;
1861 enum exec_status sts
;
1863 if (unlikely(!task
|| !task
->lldd_task
|| !task
->dev
))
1866 tstat
= &task
->task_status
;
1868 mvi_dev
= dev
->lldd_dev
;
1870 spin_lock(&task
->task_state_lock
);
1871 task
->task_state_flags
&=
1872 ~(SAS_TASK_STATE_PENDING
| SAS_TASK_AT_INITIATOR
);
1873 task
->task_state_flags
|= SAS_TASK_STATE_DONE
;
1875 aborted
= task
->task_state_flags
& SAS_TASK_STATE_ABORTED
;
1876 spin_unlock(&task
->task_state_lock
);
1878 memset(tstat
, 0, sizeof(*tstat
));
1879 tstat
->resp
= SAS_TASK_COMPLETE
;
1881 if (unlikely(aborted
)) {
1882 tstat
->stat
= SAS_ABORTED_TASK
;
1883 if (mvi_dev
&& mvi_dev
->running_req
)
1884 mvi_dev
->running_req
--;
1885 if (sas_protocol_ata(task
->task_proto
))
1886 mvs_free_reg_set(mvi
, mvi_dev
);
1888 mvs_slot_task_free(mvi
, task
, slot
, slot_idx
);
1892 /* when no device attaching, go ahead and complete by error handling*/
1893 if (unlikely(!mvi_dev
|| flags
)) {
1895 mv_dprintk("port has not device.\n");
1896 tstat
->stat
= SAS_PHY_DOWN
;
1900 /* error info record present */
1901 if (unlikely((rx_desc
& RXQ_ERR
) && (*(u64
*) slot
->response
))) {
1902 mv_dprintk("port %d slot %d rx_desc %X has error info"
1903 "%016llX.\n", slot
->port
->sas_port
.id
, slot_idx
,
1904 rx_desc
, (u64
)(*(u64
*)slot
->response
));
1905 tstat
->stat
= mvs_slot_err(mvi
, task
, slot_idx
);
1906 tstat
->resp
= SAS_TASK_COMPLETE
;
1910 switch (task
->task_proto
) {
1911 case SAS_PROTOCOL_SSP
:
1912 /* hw says status == 0, datapres == 0 */
1913 if (rx_desc
& RXQ_GOOD
) {
1914 tstat
->stat
= SAM_STAT_GOOD
;
1915 tstat
->resp
= SAS_TASK_COMPLETE
;
1917 /* response frame present */
1918 else if (rx_desc
& RXQ_RSP
) {
1919 struct ssp_response_iu
*iu
= slot
->response
+
1920 sizeof(struct mvs_err_info
);
1921 sas_ssp_task_response(mvi
->dev
, task
, iu
);
1923 tstat
->stat
= SAM_STAT_CHECK_CONDITION
;
1926 case SAS_PROTOCOL_SMP
: {
1927 struct scatterlist
*sg_resp
= &task
->smp_task
.smp_resp
;
1928 tstat
->stat
= SAM_STAT_GOOD
;
1929 to
= kmap_atomic(sg_page(sg_resp
), KM_IRQ0
);
1930 memcpy(to
+ sg_resp
->offset
,
1931 slot
->response
+ sizeof(struct mvs_err_info
),
1932 sg_dma_len(sg_resp
));
1933 kunmap_atomic(to
, KM_IRQ0
);
1937 case SAS_PROTOCOL_SATA
:
1938 case SAS_PROTOCOL_STP
:
1939 case SAS_PROTOCOL_SATA
| SAS_PROTOCOL_STP
: {
1940 tstat
->stat
= mvs_sata_done(mvi
, task
, slot_idx
, 0);
1945 tstat
->stat
= SAM_STAT_CHECK_CONDITION
;
1948 if (!slot
->port
->port_attached
) {
1949 mv_dprintk("port %d has removed.\n", slot
->port
->sas_port
.id
);
1950 tstat
->stat
= SAS_PHY_DOWN
;
1955 if (mvi_dev
&& mvi_dev
->running_req
) {
1956 mvi_dev
->running_req
--;
1957 if (sas_protocol_ata(task
->task_proto
) && !mvi_dev
->running_req
)
1958 mvs_free_reg_set(mvi
, mvi_dev
);
1960 mvs_slot_task_free(mvi
, task
, slot
, slot_idx
);
1963 spin_unlock(&mvi
->lock
);
1964 if (task
->task_done
)
1965 task
->task_done(task
);
1967 spin_lock(&mvi
->lock
);
1972 void mvs_do_release_task(struct mvs_info
*mvi
,
1973 int phy_no
, struct domain_device
*dev
)
1976 struct mvs_phy
*phy
;
1977 struct mvs_port
*port
;
1978 struct mvs_slot_info
*slot
, *slot2
;
1980 phy
= &mvi
->phy
[phy_no
];
1984 /* clean cmpl queue in case request is already finished */
1985 mvs_int_rx(mvi
, false);
1989 list_for_each_entry_safe(slot
, slot2
, &port
->list
, entry
) {
1990 struct sas_task
*task
;
1991 slot_idx
= (u32
) (slot
- mvi
->slot_info
);
1994 if (dev
&& task
->dev
!= dev
)
1997 mv_printk("Release slot [%x] tag[%x], task [%p]:\n",
1998 slot_idx
, slot
->slot_tag
, task
);
1999 MVS_CHIP_DISP
->command_active(mvi
, slot_idx
);
2001 mvs_slot_complete(mvi
, slot_idx
, 1);
2005 void mvs_release_task(struct mvs_info
*mvi
,
2006 struct domain_device
*dev
)
2008 int i
, phyno
[WIDE_PORT_MAX_PHY
], num
;
2009 num
= mvs_find_dev_phyno(dev
, phyno
);
2010 for (i
= 0; i
< num
; i
++)
2011 mvs_do_release_task(mvi
, phyno
[i
], dev
);
2014 static void mvs_phy_disconnected(struct mvs_phy
*phy
)
2016 phy
->phy_attached
= 0;
2017 phy
->att_dev_info
= 0;
2018 phy
->att_dev_sas_addr
= 0;
2021 static void mvs_work_queue(struct work_struct
*work
)
2023 struct delayed_work
*dw
= container_of(work
, struct delayed_work
, work
);
2024 struct mvs_wq
*mwq
= container_of(dw
, struct mvs_wq
, work_q
);
2025 struct mvs_info
*mvi
= mwq
->mvi
;
2026 unsigned long flags
;
2027 u32 phy_no
= (unsigned long) mwq
->data
;
2028 struct sas_ha_struct
*sas_ha
= mvi
->sas
;
2029 struct mvs_phy
*phy
= &mvi
->phy
[phy_no
];
2030 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
2032 spin_lock_irqsave(&mvi
->lock
, flags
);
2033 if (mwq
->handler
& PHY_PLUG_EVENT
) {
2035 if (phy
->phy_event
& PHY_PLUG_OUT
) {
2037 struct sas_identify_frame
*id
;
2038 id
= (struct sas_identify_frame
*)phy
->frame_rcvd
;
2039 tmp
= MVS_CHIP_DISP
->read_phy_ctl(mvi
, phy_no
);
2040 phy
->phy_event
&= ~PHY_PLUG_OUT
;
2041 if (!(tmp
& PHY_READY_MASK
)) {
2042 sas_phy_disconnected(sas_phy
);
2043 mvs_phy_disconnected(phy
);
2044 sas_ha
->notify_phy_event(sas_phy
,
2045 PHYE_LOSS_OF_SIGNAL
);
2046 mv_dprintk("phy%d Removed Device\n", phy_no
);
2048 MVS_CHIP_DISP
->detect_porttype(mvi
, phy_no
);
2049 mvs_update_phyinfo(mvi
, phy_no
, 1);
2050 mvs_bytes_dmaed(mvi
, phy_no
);
2051 mvs_port_notify_formed(sas_phy
, 0);
2052 mv_dprintk("phy%d Attached Device\n", phy_no
);
2055 } else if (mwq
->handler
& EXP_BRCT_CHG
) {
2056 phy
->phy_event
&= ~EXP_BRCT_CHG
;
2057 sas_ha
->notify_port_event(sas_phy
,
2058 PORTE_BROADCAST_RCVD
);
2059 mv_dprintk("phy%d Got Broadcast Change\n", phy_no
);
2061 list_del(&mwq
->entry
);
2062 spin_unlock_irqrestore(&mvi
->lock
, flags
);
2066 static int mvs_handle_event(struct mvs_info
*mvi
, void *data
, int handler
)
2071 mwq
= kmalloc(sizeof(struct mvs_wq
), GFP_ATOMIC
);
2075 mwq
->handler
= handler
;
2076 MV_INIT_DELAYED_WORK(&mwq
->work_q
, mvs_work_queue
, mwq
);
2077 list_add_tail(&mwq
->entry
, &mvi
->wq_list
);
2078 schedule_delayed_work(&mwq
->work_q
, HZ
* 2);
2085 static void mvs_sig_time_out(unsigned long tphy
)
2087 struct mvs_phy
*phy
= (struct mvs_phy
*)tphy
;
2088 struct mvs_info
*mvi
= phy
->mvi
;
2091 for (phy_no
= 0; phy_no
< mvi
->chip
->n_phy
; phy_no
++) {
2092 if (&mvi
->phy
[phy_no
] == phy
) {
2093 mv_dprintk("Get signature time out, reset phy %d\n",
2094 phy_no
+mvi
->id
*mvi
->chip
->n_phy
);
2095 MVS_CHIP_DISP
->phy_reset(mvi
, phy_no
, MVS_HARD_RESET
);
2100 void mvs_int_port(struct mvs_info
*mvi
, int phy_no
, u32 events
)
2103 struct mvs_phy
*phy
= &mvi
->phy
[phy_no
];
2105 phy
->irq_status
= MVS_CHIP_DISP
->read_port_irq_stat(mvi
, phy_no
);
2106 MVS_CHIP_DISP
->write_port_irq_stat(mvi
, phy_no
, phy
->irq_status
);
2107 mv_dprintk("phy %d ctrl sts=0x%08X.\n", phy_no
+mvi
->id
*mvi
->chip
->n_phy
,
2108 MVS_CHIP_DISP
->read_phy_ctl(mvi
, phy_no
));
2109 mv_dprintk("phy %d irq sts = 0x%08X\n", phy_no
+mvi
->id
*mvi
->chip
->n_phy
,
2113 * events is port event now ,
2114 * we need check the interrupt status which belongs to per port.
2117 if (phy
->irq_status
& PHYEV_DCDR_ERR
) {
2118 mv_dprintk("phy %d STP decoding error.\n",
2119 phy_no
+ mvi
->id
*mvi
->chip
->n_phy
);
2122 if (phy
->irq_status
& PHYEV_POOF
) {
2124 if (!(phy
->phy_event
& PHY_PLUG_OUT
)) {
2125 int dev_sata
= phy
->phy_type
& PORT_TYPE_SATA
;
2127 mvs_do_release_task(mvi
, phy_no
, NULL
);
2128 phy
->phy_event
|= PHY_PLUG_OUT
;
2129 MVS_CHIP_DISP
->clear_srs_irq(mvi
, 0, 1);
2130 mvs_handle_event(mvi
,
2131 (void *)(unsigned long)phy_no
,
2133 ready
= mvs_is_phy_ready(mvi
, phy_no
);
2134 if (ready
|| dev_sata
) {
2135 if (MVS_CHIP_DISP
->stp_reset
)
2136 MVS_CHIP_DISP
->stp_reset(mvi
,
2139 MVS_CHIP_DISP
->phy_reset(mvi
,
2140 phy_no
, MVS_SOFT_RESET
);
2146 if (phy
->irq_status
& PHYEV_COMWAKE
) {
2147 tmp
= MVS_CHIP_DISP
->read_port_irq_mask(mvi
, phy_no
);
2148 MVS_CHIP_DISP
->write_port_irq_mask(mvi
, phy_no
,
2149 tmp
| PHYEV_SIG_FIS
);
2150 if (phy
->timer
.function
== NULL
) {
2151 phy
->timer
.data
= (unsigned long)phy
;
2152 phy
->timer
.function
= mvs_sig_time_out
;
2153 phy
->timer
.expires
= jiffies
+ 5*HZ
;
2154 add_timer(&phy
->timer
);
2157 if (phy
->irq_status
& (PHYEV_SIG_FIS
| PHYEV_ID_DONE
)) {
2158 phy
->phy_status
= mvs_is_phy_ready(mvi
, phy_no
);
2159 mv_dprintk("notify plug in on phy[%d]\n", phy_no
);
2160 if (phy
->phy_status
) {
2162 MVS_CHIP_DISP
->detect_porttype(mvi
, phy_no
);
2163 if (phy
->phy_type
& PORT_TYPE_SATA
) {
2164 tmp
= MVS_CHIP_DISP
->read_port_irq_mask(
2166 tmp
&= ~PHYEV_SIG_FIS
;
2167 MVS_CHIP_DISP
->write_port_irq_mask(mvi
,
2170 mvs_update_phyinfo(mvi
, phy_no
, 0);
2171 if (phy
->phy_type
& PORT_TYPE_SAS
) {
2172 MVS_CHIP_DISP
->phy_reset(mvi
, phy_no
, MVS_PHY_TUNE
);
2176 mvs_bytes_dmaed(mvi
, phy_no
);
2177 /* whether driver is going to handle hot plug */
2178 if (phy
->phy_event
& PHY_PLUG_OUT
) {
2179 mvs_port_notify_formed(&phy
->sas_phy
, 0);
2180 phy
->phy_event
&= ~PHY_PLUG_OUT
;
2183 mv_dprintk("plugin interrupt but phy%d is gone\n",
2184 phy_no
+ mvi
->id
*mvi
->chip
->n_phy
);
2186 } else if (phy
->irq_status
& PHYEV_BROAD_CH
) {
2187 mv_dprintk("phy %d broadcast change.\n",
2188 phy_no
+ mvi
->id
*mvi
->chip
->n_phy
);
2189 mvs_handle_event(mvi
, (void *)(unsigned long)phy_no
,
2194 int mvs_int_rx(struct mvs_info
*mvi
, bool self_clear
)
2196 u32 rx_prod_idx
, rx_desc
;
2199 /* the first dword in the RX ring is special: it contains
2200 * a mirror of the hardware's RX producer index, so that
2201 * we don't have to stall the CPU reading that register.
2202 * The actual RX ring is offset by one dword, due to this.
2204 rx_prod_idx
= mvi
->rx_cons
;
2205 mvi
->rx_cons
= le32_to_cpu(mvi
->rx
[0]);
2206 if (mvi
->rx_cons
== 0xfff) /* h/w hasn't touched RX ring yet */
2209 /* The CMPL_Q may come late, read from register and try again
2210 * note: if coalescing is enabled,
2211 * it will need to read from register every time for sure
2213 if (unlikely(mvi
->rx_cons
== rx_prod_idx
))
2214 mvi
->rx_cons
= MVS_CHIP_DISP
->rx_update(mvi
) & RX_RING_SZ_MASK
;
2216 if (mvi
->rx_cons
== rx_prod_idx
)
2219 while (mvi
->rx_cons
!= rx_prod_idx
) {
2220 /* increment our internal RX consumer pointer */
2221 rx_prod_idx
= (rx_prod_idx
+ 1) & (MVS_RX_RING_SZ
- 1);
2222 rx_desc
= le32_to_cpu(mvi
->rx
[rx_prod_idx
+ 1]);
2224 if (likely(rx_desc
& RXQ_DONE
))
2225 mvs_slot_complete(mvi
, rx_desc
, 0);
2226 if (rx_desc
& RXQ_ATTN
) {
2228 } else if (rx_desc
& RXQ_ERR
) {
2229 if (!(rx_desc
& RXQ_DONE
))
2230 mvs_slot_complete(mvi
, rx_desc
, 0);
2231 } else if (rx_desc
& RXQ_SLOT_RESET
) {
2232 mvs_slot_free(mvi
, rx_desc
);
2236 if (attn
&& self_clear
)
2237 MVS_CHIP_DISP
->int_full(mvi
);