drm/modes: Fix drm_mode_vrefres() docs
[drm/drm-misc.git] / drivers / scsi / pm8001 / pm8001_sas.c
blobd80cffd25a6ed08cf4d40be088c811c6354eac4a
1 /*
2 * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
41 #include <linux/slab.h>
42 #include "pm8001_sas.h"
43 #include "pm80xx_tracepoints.h"
45 /**
46 * pm8001_find_tag - from sas task to find out tag that belongs to this task
47 * @task: the task sent to the LLDD
48 * @tag: the found tag associated with the task
50 static int pm8001_find_tag(struct sas_task *task, u32 *tag)
52 if (task->lldd_task) {
53 struct pm8001_ccb_info *ccb;
54 ccb = task->lldd_task;
55 *tag = ccb->ccb_tag;
56 return 1;
58 return 0;
61 /**
62 * pm8001_tag_free - free the no more needed tag
63 * @pm8001_ha: our hba struct
64 * @tag: the found tag associated with the task
66 void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
68 void *bitmap = pm8001_ha->rsvd_tags;
69 unsigned long flags;
71 if (tag >= PM8001_RESERVE_SLOT)
72 return;
74 spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
75 __clear_bit(tag, bitmap);
76 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
79 /**
80 * pm8001_tag_alloc - allocate a empty tag for task used.
81 * @pm8001_ha: our hba struct
82 * @tag_out: the found empty tag .
84 int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
86 void *bitmap = pm8001_ha->rsvd_tags;
87 unsigned long flags;
88 unsigned int tag;
90 spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
91 tag = find_first_zero_bit(bitmap, PM8001_RESERVE_SLOT);
92 if (tag >= PM8001_RESERVE_SLOT) {
93 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
94 return -SAS_QUEUE_FULL;
96 __set_bit(tag, bitmap);
97 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
99 /* reserved tags are in the lower region of the tagset */
100 *tag_out = tag;
101 return 0;
105 * pm8001_mem_alloc - allocate memory for pm8001.
106 * @pdev: pci device.
107 * @virt_addr: the allocated virtual address
108 * @pphys_addr: DMA address for this device
109 * @pphys_addr_hi: the physical address high byte address.
110 * @pphys_addr_lo: the physical address low byte address.
111 * @mem_size: memory size.
112 * @align: requested byte alignment
114 int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
115 dma_addr_t *pphys_addr, u32 *pphys_addr_hi,
116 u32 *pphys_addr_lo, u32 mem_size, u32 align)
118 caddr_t mem_virt_alloc;
119 dma_addr_t mem_dma_handle;
120 u64 phys_align;
121 u64 align_offset = 0;
122 if (align)
123 align_offset = (dma_addr_t)align - 1;
124 mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
125 &mem_dma_handle, GFP_KERNEL);
126 if (!mem_virt_alloc)
127 return -ENOMEM;
128 *pphys_addr = mem_dma_handle;
129 phys_align = (*pphys_addr + align_offset) & ~align_offset;
130 *virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr;
131 *pphys_addr_hi = upper_32_bits(phys_align);
132 *pphys_addr_lo = lower_32_bits(phys_align);
133 return 0;
137 * pm8001_find_ha_by_dev - from domain device which come from sas layer to
138 * find out our hba struct.
139 * @dev: the domain device which from sas layer.
141 static
142 struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev)
144 struct sas_ha_struct *sha = dev->port->ha;
145 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
146 return pm8001_ha;
150 * pm8001_phy_control - this function should be registered to
151 * sas_domain_function_template to provide libsas used, note: this is just
152 * control the HBA phy rather than other expander phy if you want control
153 * other phy, you should use SMP command.
154 * @sas_phy: which phy in HBA phys.
155 * @func: the operation.
156 * @funcdata: always NULL.
158 int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
159 void *funcdata)
161 int rc = 0, phy_id = sas_phy->id;
162 struct pm8001_hba_info *pm8001_ha = NULL;
163 struct sas_phy_linkrates *rates;
164 struct pm8001_phy *phy;
165 DECLARE_COMPLETION_ONSTACK(completion);
166 unsigned long flags;
167 pm8001_ha = sas_phy->ha->lldd_ha;
168 phy = &pm8001_ha->phy[phy_id];
170 if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
172 * If the controller is in fatal error state,
173 * we will not get a response from the controller
175 pm8001_dbg(pm8001_ha, FAIL,
176 "Phy control failed due to fatal errors\n");
177 return -EFAULT;
180 switch (func) {
181 case PHY_FUNC_SET_LINK_RATE:
182 rates = funcdata;
183 if (rates->minimum_linkrate) {
184 pm8001_ha->phy[phy_id].minimum_linkrate =
185 rates->minimum_linkrate;
187 if (rates->maximum_linkrate) {
188 pm8001_ha->phy[phy_id].maximum_linkrate =
189 rates->maximum_linkrate;
191 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
192 pm8001_ha->phy[phy_id].enable_completion = &completion;
193 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
194 wait_for_completion(&completion);
196 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
197 PHY_LINK_RESET);
198 break;
199 case PHY_FUNC_HARD_RESET:
200 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
201 pm8001_ha->phy[phy_id].enable_completion = &completion;
202 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
203 wait_for_completion(&completion);
205 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
206 PHY_HARD_RESET);
207 break;
208 case PHY_FUNC_LINK_RESET:
209 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
210 pm8001_ha->phy[phy_id].enable_completion = &completion;
211 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
212 wait_for_completion(&completion);
214 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
215 PHY_LINK_RESET);
216 break;
217 case PHY_FUNC_RELEASE_SPINUP_HOLD:
218 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
219 PHY_LINK_RESET);
220 break;
221 case PHY_FUNC_DISABLE:
222 if (pm8001_ha->chip_id != chip_8001) {
223 if (pm8001_ha->phy[phy_id].phy_state ==
224 PHY_STATE_LINK_UP_SPCV) {
225 sas_phy_disconnected(&phy->sas_phy);
226 sas_notify_phy_event(&phy->sas_phy,
227 PHYE_LOSS_OF_SIGNAL, GFP_KERNEL);
228 phy->phy_attached = 0;
230 } else {
231 if (pm8001_ha->phy[phy_id].phy_state ==
232 PHY_STATE_LINK_UP_SPC) {
233 sas_phy_disconnected(&phy->sas_phy);
234 sas_notify_phy_event(&phy->sas_phy,
235 PHYE_LOSS_OF_SIGNAL, GFP_KERNEL);
236 phy->phy_attached = 0;
239 PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
240 break;
241 case PHY_FUNC_GET_EVENTS:
242 spin_lock_irqsave(&pm8001_ha->lock, flags);
243 if (pm8001_ha->chip_id == chip_8001) {
244 if (-1 == pm8001_bar4_shift(pm8001_ha,
245 (phy_id < 4) ? 0x30000 : 0x40000)) {
246 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
247 return -EINVAL;
251 struct sas_phy *phy = sas_phy->phy;
252 u32 __iomem *qp = pm8001_ha->io_mem[2].memvirtaddr
253 + 0x1034 + (0x4000 * (phy_id & 3));
255 phy->invalid_dword_count = readl(qp);
256 phy->running_disparity_error_count = readl(&qp[1]);
257 phy->loss_of_dword_sync_count = readl(&qp[3]);
258 phy->phy_reset_problem_count = readl(&qp[4]);
260 if (pm8001_ha->chip_id == chip_8001)
261 pm8001_bar4_shift(pm8001_ha, 0);
262 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
263 return 0;
264 default:
265 pm8001_dbg(pm8001_ha, DEVIO, "func 0x%x\n", func);
266 rc = -EOPNOTSUPP;
268 msleep(300);
269 return rc;
273 * pm8001_scan_start - we should enable all HBA phys by sending the phy_start
274 * command to HBA.
275 * @shost: the scsi host data.
277 void pm8001_scan_start(struct Scsi_Host *shost)
279 int i;
280 struct pm8001_hba_info *pm8001_ha;
281 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
282 DECLARE_COMPLETION_ONSTACK(completion);
283 pm8001_ha = sha->lldd_ha;
284 /* SAS_RE_INITIALIZATION not available in SPCv/ve */
285 if (pm8001_ha->chip_id == chip_8001)
286 PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
287 for (i = 0; i < pm8001_ha->chip->n_phy; ++i) {
288 pm8001_ha->phy[i].enable_completion = &completion;
289 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
290 wait_for_completion(&completion);
291 msleep(300);
295 int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
297 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
299 /* give the phy enabling interrupt event time to come in (1s
300 * is empirically about all it takes) */
301 if (time < HZ)
302 return 0;
303 /* Wait for discovery to finish */
304 sas_drain_work(ha);
305 return 1;
309 * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task
310 * @pm8001_ha: our hba card information
311 * @ccb: the ccb which attached to smp task
313 static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha,
314 struct pm8001_ccb_info *ccb)
316 return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb);
319 u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag)
321 struct ata_queued_cmd *qc = task->uldd_task;
323 if (qc && ata_is_ncq(qc->tf.protocol)) {
324 *tag = qc->tag;
325 return 1;
328 return 0;
332 * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task
333 * @pm8001_ha: our hba card information
334 * @ccb: the ccb which attached to sata task
336 static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha,
337 struct pm8001_ccb_info *ccb)
339 return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb);
343 * pm8001_task_prep_internal_abort - the dispatcher function, prepare data
344 * for internal abort task
345 * @pm8001_ha: our hba card information
346 * @ccb: the ccb which attached to sata task
348 static int pm8001_task_prep_internal_abort(struct pm8001_hba_info *pm8001_ha,
349 struct pm8001_ccb_info *ccb)
351 return PM8001_CHIP_DISP->task_abort(pm8001_ha, ccb);
355 * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data
356 * @pm8001_ha: our hba card information
357 * @ccb: the ccb which attached to TM
358 * @tmf: the task management IU
360 static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha,
361 struct pm8001_ccb_info *ccb, struct sas_tmf_task *tmf)
363 return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf);
367 * pm8001_task_prep_ssp - the dispatcher function, prepare ssp data for ssp task
368 * @pm8001_ha: our hba card information
369 * @ccb: the ccb which attached to ssp task
371 static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha,
372 struct pm8001_ccb_info *ccb)
374 return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb);
377 /* Find the local port id that's attached to this device */
378 static int sas_find_local_port_id(struct domain_device *dev)
380 struct domain_device *pdev = dev->parent;
382 /* Directly attached device */
383 if (!pdev)
384 return dev->port->id;
385 while (pdev) {
386 struct domain_device *pdev_p = pdev->parent;
387 if (!pdev_p)
388 return pdev->port->id;
389 pdev = pdev->parent;
391 return 0;
394 #define DEV_IS_GONE(pm8001_dev) \
395 ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
398 static int pm8001_deliver_command(struct pm8001_hba_info *pm8001_ha,
399 struct pm8001_ccb_info *ccb)
401 struct sas_task *task = ccb->task;
402 enum sas_protocol task_proto = task->task_proto;
403 struct sas_tmf_task *tmf = task->tmf;
404 int is_tmf = !!tmf;
406 switch (task_proto) {
407 case SAS_PROTOCOL_SMP:
408 return pm8001_task_prep_smp(pm8001_ha, ccb);
409 case SAS_PROTOCOL_SSP:
410 if (is_tmf)
411 return pm8001_task_prep_ssp_tm(pm8001_ha, ccb, tmf);
412 return pm8001_task_prep_ssp(pm8001_ha, ccb);
413 case SAS_PROTOCOL_SATA:
414 case SAS_PROTOCOL_STP:
415 return pm8001_task_prep_ata(pm8001_ha, ccb);
416 case SAS_PROTOCOL_INTERNAL_ABORT:
417 return pm8001_task_prep_internal_abort(pm8001_ha, ccb);
418 default:
419 dev_err(pm8001_ha->dev, "unknown sas_task proto: 0x%x\n",
420 task_proto);
423 return -EINVAL;
427 * pm8001_queue_command - register for upper layer used, all IO commands sent
428 * to HBA are from this interface.
429 * @task: the task to be execute.
430 * @gfp_flags: gfp_flags
432 int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
434 struct task_status_struct *ts = &task->task_status;
435 enum sas_protocol task_proto = task->task_proto;
436 struct domain_device *dev = task->dev;
437 struct pm8001_device *pm8001_dev = dev->lldd_dev;
438 bool internal_abort = sas_is_internal_abort(task);
439 struct pm8001_hba_info *pm8001_ha;
440 struct pm8001_port *port = NULL;
441 struct pm8001_ccb_info *ccb;
442 unsigned long flags;
443 u32 n_elem = 0;
444 int rc = 0;
446 if (!internal_abort && !dev->port) {
447 ts->resp = SAS_TASK_UNDELIVERED;
448 ts->stat = SAS_PHY_DOWN;
449 if (dev->dev_type != SAS_SATA_DEV)
450 task->task_done(task);
451 return 0;
454 pm8001_ha = pm8001_find_ha_by_dev(dev);
455 if (pm8001_ha->controller_fatal_error) {
456 ts->resp = SAS_TASK_UNDELIVERED;
457 task->task_done(task);
458 return 0;
461 pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device\n");
463 spin_lock_irqsave(&pm8001_ha->lock, flags);
465 pm8001_dev = dev->lldd_dev;
466 port = &pm8001_ha->port[sas_find_local_port_id(dev)];
468 if (!internal_abort &&
469 (DEV_IS_GONE(pm8001_dev) || !port->port_attached)) {
470 ts->resp = SAS_TASK_UNDELIVERED;
471 ts->stat = SAS_PHY_DOWN;
472 if (sas_protocol_ata(task_proto)) {
473 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
474 task->task_done(task);
475 spin_lock_irqsave(&pm8001_ha->lock, flags);
476 } else {
477 task->task_done(task);
479 rc = -ENODEV;
480 goto err_out;
483 ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, task);
484 if (!ccb) {
485 rc = -SAS_QUEUE_FULL;
486 goto err_out;
489 if (!sas_protocol_ata(task_proto)) {
490 if (task->num_scatter) {
491 n_elem = dma_map_sg(pm8001_ha->dev, task->scatter,
492 task->num_scatter, task->data_dir);
493 if (!n_elem) {
494 rc = -ENOMEM;
495 goto err_out_ccb;
498 } else {
499 n_elem = task->num_scatter;
502 task->lldd_task = ccb;
503 ccb->n_elem = n_elem;
505 atomic_inc(&pm8001_dev->running_req);
507 rc = pm8001_deliver_command(pm8001_ha, ccb);
508 if (rc) {
509 atomic_dec(&pm8001_dev->running_req);
510 if (!sas_protocol_ata(task_proto) && n_elem)
511 dma_unmap_sg(pm8001_ha->dev, task->scatter,
512 task->num_scatter, task->data_dir);
513 err_out_ccb:
514 pm8001_ccb_free(pm8001_ha, ccb);
516 err_out:
517 pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec failed[%d]!\n", rc);
520 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
522 return rc;
526 * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb.
527 * @pm8001_ha: our hba card information
528 * @ccb: the ccb which attached to ssp task to free
530 void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
531 struct pm8001_ccb_info *ccb)
533 struct sas_task *task = ccb->task;
534 struct ata_queued_cmd *qc;
535 struct pm8001_device *pm8001_dev;
537 if (!task)
538 return;
540 if (!sas_protocol_ata(task->task_proto) && ccb->n_elem)
541 dma_unmap_sg(pm8001_ha->dev, task->scatter,
542 task->num_scatter, task->data_dir);
544 switch (task->task_proto) {
545 case SAS_PROTOCOL_SMP:
546 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1,
547 DMA_FROM_DEVICE);
548 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1,
549 DMA_TO_DEVICE);
550 break;
552 case SAS_PROTOCOL_SATA:
553 case SAS_PROTOCOL_STP:
554 case SAS_PROTOCOL_SSP:
555 default:
556 /* do nothing */
557 break;
560 if (sas_protocol_ata(task->task_proto)) {
561 /* For SCSI/ATA commands uldd_task points to ata_queued_cmd */
562 qc = task->uldd_task;
563 pm8001_dev = ccb->device;
564 trace_pm80xx_request_complete(pm8001_ha->id,
565 pm8001_dev ? pm8001_dev->attached_phy : PM8001_MAX_PHYS,
566 ccb->ccb_tag, 0 /* ctlr_opcode not known */,
567 qc ? qc->tf.command : 0, // ata opcode
568 pm8001_dev ? atomic_read(&pm8001_dev->running_req) : -1);
571 task->lldd_task = NULL;
572 pm8001_ccb_free(pm8001_ha, ccb);
575 static void pm8001_init_dev(struct pm8001_device *pm8001_dev, int id)
577 pm8001_dev->id = id;
578 pm8001_dev->device_id = PM8001_MAX_DEVICES;
579 atomic_set(&pm8001_dev->running_req, 0);
583 * pm8001_alloc_dev - find a empty pm8001_device
584 * @pm8001_ha: our hba card information
586 static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
588 u32 dev;
589 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
590 struct pm8001_device *pm8001_dev = &pm8001_ha->devices[dev];
592 if (pm8001_dev->dev_type == SAS_PHY_UNUSED) {
593 pm8001_init_dev(pm8001_dev, dev);
594 return pm8001_dev;
597 if (dev == PM8001_MAX_DEVICES) {
598 pm8001_dbg(pm8001_ha, FAIL,
599 "max support %d devices, ignore ..\n",
600 PM8001_MAX_DEVICES);
602 return NULL;
605 * pm8001_find_dev - find a matching pm8001_device
606 * @pm8001_ha: our hba card information
607 * @device_id: device ID to match against
609 struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
610 u32 device_id)
612 u32 dev;
613 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
614 if (pm8001_ha->devices[dev].device_id == device_id)
615 return &pm8001_ha->devices[dev];
617 if (dev == PM8001_MAX_DEVICES) {
618 pm8001_dbg(pm8001_ha, FAIL, "NO MATCHING DEVICE FOUND !!!\n");
620 return NULL;
623 void pm8001_free_dev(struct pm8001_device *pm8001_dev)
625 memset(pm8001_dev, 0, sizeof(*pm8001_dev));
626 pm8001_dev->dev_type = SAS_PHY_UNUSED;
627 pm8001_dev->device_id = PM8001_MAX_DEVICES;
628 pm8001_dev->sas_device = NULL;
632 * pm8001_dev_found_notify - libsas notify a device is found.
633 * @dev: the device structure which sas layer used.
635 * when libsas find a sas domain device, it should tell the LLDD that
636 * device is found, and then LLDD register this device to HBA firmware
637 * by the command "OPC_INB_REG_DEV", after that the HBA will assign a
638 * device ID(according to device's sas address) and returned it to LLDD. From
639 * now on, we communicate with HBA FW with the device ID which HBA assigned
640 * rather than sas address. it is the necessary step for our HBA but it is
641 * the optional for other HBA driver.
643 static int pm8001_dev_found_notify(struct domain_device *dev)
645 unsigned long flags = 0;
646 int res = 0;
647 struct pm8001_hba_info *pm8001_ha = NULL;
648 struct domain_device *parent_dev = dev->parent;
649 struct pm8001_device *pm8001_device;
650 DECLARE_COMPLETION_ONSTACK(completion);
651 u32 flag = 0;
652 pm8001_ha = pm8001_find_ha_by_dev(dev);
653 spin_lock_irqsave(&pm8001_ha->lock, flags);
655 pm8001_device = pm8001_alloc_dev(pm8001_ha);
656 if (!pm8001_device) {
657 res = -1;
658 goto found_out;
660 pm8001_device->sas_device = dev;
661 dev->lldd_dev = pm8001_device;
662 pm8001_device->dev_type = dev->dev_type;
663 pm8001_device->dcompletion = &completion;
664 if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
665 int phy_id;
667 phy_id = sas_find_attached_phy_id(&parent_dev->ex_dev, dev);
668 if (phy_id < 0) {
669 pm8001_dbg(pm8001_ha, FAIL,
670 "Error: no attached dev:%016llx at ex:%016llx.\n",
671 SAS_ADDR(dev->sas_addr),
672 SAS_ADDR(parent_dev->sas_addr));
673 res = phy_id;
674 } else {
675 pm8001_device->attached_phy = phy_id;
677 } else {
678 if (dev->dev_type == SAS_SATA_DEV) {
679 pm8001_device->attached_phy =
680 dev->rphy->identify.phy_identifier;
681 flag = 1; /* directly sata */
683 } /*register this device to HBA*/
684 pm8001_dbg(pm8001_ha, DISC, "Found device\n");
685 PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
686 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
687 wait_for_completion(&completion);
688 if (dev->dev_type == SAS_END_DEVICE)
689 msleep(50);
690 pm8001_ha->flags = PM8001F_RUN_TIME;
691 return 0;
692 found_out:
693 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
694 return res;
697 int pm8001_dev_found(struct domain_device *dev)
699 return pm8001_dev_found_notify(dev);
702 #define PM8001_TASK_TIMEOUT 20
705 * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify"
706 * @dev: the device structure which sas layer used.
708 static void pm8001_dev_gone_notify(struct domain_device *dev)
710 unsigned long flags = 0;
711 struct pm8001_hba_info *pm8001_ha;
712 struct pm8001_device *pm8001_dev = dev->lldd_dev;
714 pm8001_ha = pm8001_find_ha_by_dev(dev);
715 spin_lock_irqsave(&pm8001_ha->lock, flags);
716 if (pm8001_dev) {
717 u32 device_id = pm8001_dev->device_id;
719 pm8001_dbg(pm8001_ha, DISC, "found dev[%d:%x] is gone.\n",
720 pm8001_dev->device_id, pm8001_dev->dev_type);
721 if (atomic_read(&pm8001_dev->running_req)) {
722 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
723 sas_execute_internal_abort_dev(dev, 0, NULL);
724 while (atomic_read(&pm8001_dev->running_req))
725 msleep(20);
726 spin_lock_irqsave(&pm8001_ha->lock, flags);
728 PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
729 pm8001_free_dev(pm8001_dev);
730 } else {
731 pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
733 dev->lldd_dev = NULL;
734 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
737 void pm8001_dev_gone(struct domain_device *dev)
739 pm8001_dev_gone_notify(dev);
742 /* retry commands by ha, by task and/or by device */
743 void pm8001_open_reject_retry(
744 struct pm8001_hba_info *pm8001_ha,
745 struct sas_task *task_to_close,
746 struct pm8001_device *device_to_close)
748 int i;
749 unsigned long flags;
751 if (pm8001_ha == NULL)
752 return;
754 spin_lock_irqsave(&pm8001_ha->lock, flags);
756 for (i = 0; i < PM8001_MAX_CCB; i++) {
757 struct sas_task *task;
758 struct task_status_struct *ts;
759 struct pm8001_device *pm8001_dev;
760 unsigned long flags1;
761 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
763 if (ccb->ccb_tag == PM8001_INVALID_TAG)
764 continue;
766 pm8001_dev = ccb->device;
767 if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
768 continue;
769 if (!device_to_close) {
770 uintptr_t d = (uintptr_t)pm8001_dev
771 - (uintptr_t)&pm8001_ha->devices;
772 if (((d % sizeof(*pm8001_dev)) != 0)
773 || ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES))
774 continue;
775 } else if (pm8001_dev != device_to_close)
776 continue;
777 task = ccb->task;
778 if (!task || !task->task_done)
779 continue;
780 if (task_to_close && (task != task_to_close))
781 continue;
782 ts = &task->task_status;
783 ts->resp = SAS_TASK_COMPLETE;
784 /* Force the midlayer to retry */
785 ts->stat = SAS_OPEN_REJECT;
786 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
787 if (pm8001_dev)
788 atomic_dec(&pm8001_dev->running_req);
789 spin_lock_irqsave(&task->task_state_lock, flags1);
790 task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
791 task->task_state_flags |= SAS_TASK_STATE_DONE;
792 if (unlikely((task->task_state_flags
793 & SAS_TASK_STATE_ABORTED))) {
794 spin_unlock_irqrestore(&task->task_state_lock,
795 flags1);
796 pm8001_ccb_task_free(pm8001_ha, ccb);
797 } else {
798 spin_unlock_irqrestore(&task->task_state_lock,
799 flags1);
800 pm8001_ccb_task_free(pm8001_ha, ccb);
801 mb();/* in order to force CPU ordering */
802 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
803 task->task_done(task);
804 spin_lock_irqsave(&pm8001_ha->lock, flags);
808 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
812 * pm8001_I_T_nexus_reset() - reset the initiator/target connection
813 * @dev: the device structure for the device to reset.
815 * Standard mandates link reset for ATA (type 0) and hard reset for
816 * SSP (type 1), only for RECOVERY
818 int pm8001_I_T_nexus_reset(struct domain_device *dev)
820 int rc = TMF_RESP_FUNC_FAILED;
821 struct pm8001_device *pm8001_dev;
822 struct pm8001_hba_info *pm8001_ha;
823 struct sas_phy *phy;
825 if (!dev || !dev->lldd_dev)
826 return -ENODEV;
828 pm8001_dev = dev->lldd_dev;
829 pm8001_ha = pm8001_find_ha_by_dev(dev);
830 phy = sas_get_local_phy(dev);
832 if (dev_is_sata(dev)) {
833 if (scsi_is_sas_phy_local(phy)) {
834 rc = 0;
835 goto out;
837 rc = sas_phy_reset(phy, 1);
838 if (rc) {
839 pm8001_dbg(pm8001_ha, EH,
840 "phy reset failed for device %x\n"
841 "with rc %d\n", pm8001_dev->device_id, rc);
842 rc = TMF_RESP_FUNC_FAILED;
843 goto out;
845 msleep(2000);
846 rc = sas_execute_internal_abort_dev(dev, 0, NULL);
847 if (rc) {
848 pm8001_dbg(pm8001_ha, EH, "task abort failed %x\n"
849 "with rc %d\n", pm8001_dev->device_id, rc);
850 rc = TMF_RESP_FUNC_FAILED;
852 } else {
853 rc = sas_phy_reset(phy, 1);
854 msleep(2000);
856 pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
857 pm8001_dev->device_id, rc);
858 out:
859 sas_put_local_phy(phy);
860 return rc;
864 * This function handle the IT_NEXUS_XXX event or completion
865 * status code for SSP/SATA/SMP I/O request.
867 int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
869 int rc = TMF_RESP_FUNC_FAILED;
870 struct pm8001_device *pm8001_dev;
871 struct pm8001_hba_info *pm8001_ha;
872 struct sas_phy *phy;
874 if (!dev || !dev->lldd_dev)
875 return -1;
877 pm8001_dev = dev->lldd_dev;
878 pm8001_ha = pm8001_find_ha_by_dev(dev);
880 pm8001_dbg(pm8001_ha, EH, "I_T_Nexus handler invoked !!\n");
882 phy = sas_get_local_phy(dev);
884 if (dev_is_sata(dev)) {
885 DECLARE_COMPLETION_ONSTACK(completion_setstate);
886 if (scsi_is_sas_phy_local(phy)) {
887 rc = 0;
888 goto out;
890 /* send internal ssp/sata/smp abort command to FW */
891 sas_execute_internal_abort_dev(dev, 0, NULL);
892 msleep(100);
894 /* deregister the target device */
895 pm8001_dev_gone_notify(dev);
896 msleep(200);
898 /*send phy reset to hard reset target */
899 rc = sas_phy_reset(phy, 1);
900 msleep(2000);
901 pm8001_dev->setds_completion = &completion_setstate;
903 wait_for_completion(&completion_setstate);
904 } else {
905 /* send internal ssp/sata/smp abort command to FW */
906 sas_execute_internal_abort_dev(dev, 0, NULL);
907 msleep(100);
909 /* deregister the target device */
910 pm8001_dev_gone_notify(dev);
911 msleep(200);
913 /*send phy reset to hard reset target */
914 rc = sas_phy_reset(phy, 1);
915 msleep(2000);
917 pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
918 pm8001_dev->device_id, rc);
919 out:
920 sas_put_local_phy(phy);
922 return rc;
924 /* mandatory SAM-3, the task reset the specified LUN*/
925 int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
927 int rc = TMF_RESP_FUNC_FAILED;
928 struct pm8001_device *pm8001_dev = dev->lldd_dev;
929 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
930 DECLARE_COMPLETION_ONSTACK(completion_setstate);
932 if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
934 * If the controller is in fatal error state,
935 * we will not get a response from the controller
937 pm8001_dbg(pm8001_ha, FAIL,
938 "LUN reset failed due to fatal errors\n");
939 return rc;
942 if (dev_is_sata(dev)) {
943 struct sas_phy *phy = sas_get_local_phy(dev);
944 sas_execute_internal_abort_dev(dev, 0, NULL);
945 rc = sas_phy_reset(phy, 1);
946 sas_put_local_phy(phy);
947 pm8001_dev->setds_completion = &completion_setstate;
948 rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
949 pm8001_dev, DS_OPERATIONAL);
950 wait_for_completion(&completion_setstate);
951 } else {
952 rc = sas_lu_reset(dev, lun);
954 /* If failed, fall-through I_T_Nexus reset */
955 pm8001_dbg(pm8001_ha, EH, "for device[%x]:rc=%d\n",
956 pm8001_dev->device_id, rc);
957 return rc;
960 /* optional SAM-3 */
961 int pm8001_query_task(struct sas_task *task)
963 u32 tag = 0xdeadbeef;
964 int rc = TMF_RESP_FUNC_FAILED;
965 if (unlikely(!task || !task->lldd_task || !task->dev))
966 return rc;
968 if (task->task_proto & SAS_PROTOCOL_SSP) {
969 struct scsi_cmnd *cmnd = task->uldd_task;
970 struct domain_device *dev = task->dev;
971 struct pm8001_hba_info *pm8001_ha =
972 pm8001_find_ha_by_dev(dev);
974 rc = pm8001_find_tag(task, &tag);
975 if (rc == 0) {
976 rc = TMF_RESP_FUNC_FAILED;
977 return rc;
979 pm8001_dbg(pm8001_ha, EH, "Query:[%16ph]\n", cmnd->cmnd);
981 rc = sas_query_task(task, tag);
982 switch (rc) {
983 /* The task is still in Lun, release it then */
984 case TMF_RESP_FUNC_SUCC:
985 pm8001_dbg(pm8001_ha, EH,
986 "The task is still in Lun\n");
987 break;
988 /* The task is not in Lun or failed, reset the phy */
989 case TMF_RESP_FUNC_FAILED:
990 case TMF_RESP_FUNC_COMPLETE:
991 pm8001_dbg(pm8001_ha, EH,
992 "The task is not in Lun or failed, reset the phy\n");
993 break;
996 pr_err("pm80xx: rc= %d\n", rc);
997 return rc;
1000 /* mandatory SAM-3, still need free task/ccb info, abort the specified task */
1001 int pm8001_abort_task(struct sas_task *task)
1003 struct pm8001_ccb_info *ccb = task->lldd_task;
1004 unsigned long flags;
1005 u32 tag;
1006 struct domain_device *dev ;
1007 struct pm8001_hba_info *pm8001_ha;
1008 struct pm8001_device *pm8001_dev;
1009 int rc = TMF_RESP_FUNC_FAILED, ret;
1010 u32 phy_id, port_id;
1011 struct sas_task_slow slow_task;
1013 if (!task->lldd_task || !task->dev)
1014 return TMF_RESP_FUNC_FAILED;
1016 dev = task->dev;
1017 pm8001_dev = dev->lldd_dev;
1018 pm8001_ha = pm8001_find_ha_by_dev(dev);
1019 phy_id = pm8001_dev->attached_phy;
1021 if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
1022 // If the controller is seeing fatal errors
1023 // abort task will not get a response from the controller
1024 return TMF_RESP_FUNC_FAILED;
1027 ret = pm8001_find_tag(task, &tag);
1028 if (ret == 0) {
1029 pm8001_info(pm8001_ha, "no tag for task:%p\n", task);
1030 return TMF_RESP_FUNC_FAILED;
1032 spin_lock_irqsave(&task->task_state_lock, flags);
1033 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1034 spin_unlock_irqrestore(&task->task_state_lock, flags);
1035 return TMF_RESP_FUNC_COMPLETE;
1037 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1038 if (task->slow_task == NULL) {
1039 init_completion(&slow_task.completion);
1040 task->slow_task = &slow_task;
1042 spin_unlock_irqrestore(&task->task_state_lock, flags);
1043 if (task->task_proto & SAS_PROTOCOL_SSP) {
1044 rc = sas_abort_task(task, tag);
1045 sas_execute_internal_abort_single(dev, tag, 0, NULL);
1046 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1047 task->task_proto & SAS_PROTOCOL_STP) {
1048 if (pm8001_ha->chip_id == chip_8006) {
1049 DECLARE_COMPLETION_ONSTACK(completion_reset);
1050 DECLARE_COMPLETION_ONSTACK(completion);
1051 struct pm8001_phy *phy = pm8001_ha->phy + phy_id;
1052 port_id = phy->port->port_id;
1054 /* 1. Set Device state as Recovery */
1055 pm8001_dev->setds_completion = &completion;
1056 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1057 pm8001_dev, DS_IN_RECOVERY);
1058 wait_for_completion(&completion);
1060 /* 2. Send Phy Control Hard Reset */
1061 reinit_completion(&completion);
1062 phy->port_reset_status = PORT_RESET_TMO;
1063 phy->reset_success = false;
1064 phy->enable_completion = &completion;
1065 phy->reset_completion = &completion_reset;
1066 ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
1067 PHY_HARD_RESET);
1068 if (ret) {
1069 phy->enable_completion = NULL;
1070 phy->reset_completion = NULL;
1071 goto out;
1074 /* In the case of the reset timeout/fail we still
1075 * abort the command at the firmware. The assumption
1076 * here is that the drive is off doing something so
1077 * that it's not processing requests, and we want to
1078 * avoid getting a completion for this and either
1079 * leaking the task in libsas or losing the race and
1080 * getting a double free.
1082 pm8001_dbg(pm8001_ha, MSG,
1083 "Waiting for local phy ctl\n");
1084 ret = wait_for_completion_timeout(&completion,
1085 PM8001_TASK_TIMEOUT * HZ);
1086 if (!ret || !phy->reset_success) {
1087 phy->enable_completion = NULL;
1088 phy->reset_completion = NULL;
1089 } else {
1090 /* 3. Wait for Port Reset complete or
1091 * Port reset TMO
1093 pm8001_dbg(pm8001_ha, MSG,
1094 "Waiting for Port reset\n");
1095 ret = wait_for_completion_timeout(
1096 &completion_reset,
1097 PM8001_TASK_TIMEOUT * HZ);
1098 if (!ret)
1099 phy->reset_completion = NULL;
1100 WARN_ON(phy->port_reset_status ==
1101 PORT_RESET_TMO);
1102 if (phy->port_reset_status == PORT_RESET_TMO) {
1103 pm8001_dev_gone_notify(dev);
1104 PM8001_CHIP_DISP->hw_event_ack_req(
1105 pm8001_ha, 0,
1106 0x07, /*HW_EVENT_PHY_DOWN ack*/
1107 port_id, phy_id, 0, 0);
1108 goto out;
1113 * 4. SATA Abort ALL
1114 * we wait for the task to be aborted so that the task
1115 * is removed from the ccb. on success the caller is
1116 * going to free the task.
1118 ret = sas_execute_internal_abort_dev(dev, 0, NULL);
1119 if (ret)
1120 goto out;
1121 ret = wait_for_completion_timeout(
1122 &task->slow_task->completion,
1123 PM8001_TASK_TIMEOUT * HZ);
1124 if (!ret)
1125 goto out;
1127 /* 5. Set Device State as Operational */
1128 reinit_completion(&completion);
1129 pm8001_dev->setds_completion = &completion;
1130 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1131 pm8001_dev, DS_OPERATIONAL);
1132 wait_for_completion(&completion);
1133 } else {
1135 * Ensure that if we see a completion for the ccb
1136 * associated with the task which we are trying to
1137 * abort then we should not touch the sas_task as it
1138 * may race with libsas freeing it when return here.
1140 ccb->task = NULL;
1141 ret = sas_execute_internal_abort_single(dev, tag, 0, NULL);
1143 rc = TMF_RESP_FUNC_COMPLETE;
1144 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
1145 /* SMP */
1146 rc = sas_execute_internal_abort_single(dev, tag, 0, NULL);
1149 out:
1150 spin_lock_irqsave(&task->task_state_lock, flags);
1151 if (task->slow_task == &slow_task)
1152 task->slow_task = NULL;
1153 spin_unlock_irqrestore(&task->task_state_lock, flags);
1154 if (rc != TMF_RESP_FUNC_COMPLETE)
1155 pm8001_info(pm8001_ha, "rc= %d\n", rc);
1156 return rc;
1159 int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
1161 struct pm8001_device *pm8001_dev = dev->lldd_dev;
1162 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1164 pm8001_dbg(pm8001_ha, EH, "I_T_L_Q clear task set[%x]\n",
1165 pm8001_dev->device_id);
1166 return sas_clear_task_set(dev, lun);
1169 void pm8001_port_formed(struct asd_sas_phy *sas_phy)
1171 struct sas_ha_struct *sas_ha = sas_phy->ha;
1172 struct pm8001_hba_info *pm8001_ha = sas_ha->lldd_ha;
1173 struct pm8001_phy *phy = sas_phy->lldd_phy;
1174 struct asd_sas_port *sas_port = sas_phy->port;
1175 struct pm8001_port *port = phy->port;
1177 if (!sas_port) {
1178 pm8001_dbg(pm8001_ha, FAIL, "Received null port\n");
1179 return;
1181 sas_port->lldd_port = port;
1184 void pm8001_setds_completion(struct domain_device *dev)
1186 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1187 struct pm8001_device *pm8001_dev = dev->lldd_dev;
1188 DECLARE_COMPLETION_ONSTACK(completion_setstate);
1190 if (pm8001_ha->chip_id != chip_8001) {
1191 pm8001_dev->setds_completion = &completion_setstate;
1192 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1193 pm8001_dev, DS_OPERATIONAL);
1194 wait_for_completion(&completion_setstate);
1198 void pm8001_tmf_aborted(struct sas_task *task)
1200 struct pm8001_ccb_info *ccb = task->lldd_task;
1202 if (ccb)
1203 ccb->task = NULL;