1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * libahci.c - Common AHCI SATA low-level routines
5 * Maintained by: Tejun Heo <tj@kernel.org>
6 * Please ALWAYS copy linux-ide@vger.kernel.org
9 * Copyright 2004-2005 Red Hat, Inc.
11 * libata documentation is available via 'make {ps|pdf}docs',
12 * as Documentation/driver-api/libata.rst
14 * AHCI hardware documentation:
15 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
16 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
19 #include <linux/kernel.h>
20 #include <linux/gfp.h>
21 #include <linux/module.h>
22 #include <linux/nospec.h>
23 #include <linux/blkdev.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_cmnd.h>
30 #include <linux/libata.h>
31 #include <linux/pci.h>
35 static int ahci_skip_host_reset
;
37 EXPORT_SYMBOL_GPL(ahci_ignore_sss
);
39 module_param_named(skip_host_reset
, ahci_skip_host_reset
, int, 0444);
40 MODULE_PARM_DESC(skip_host_reset
, "skip global host reset (0=don't skip, 1=skip)");
42 module_param_named(ignore_sss
, ahci_ignore_sss
, int, 0444);
43 MODULE_PARM_DESC(ignore_sss
, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
45 static int ahci_set_lpm(struct ata_link
*link
, enum ata_lpm_policy policy
,
47 static ssize_t
ahci_led_show(struct ata_port
*ap
, char *buf
);
48 static ssize_t
ahci_led_store(struct ata_port
*ap
, const char *buf
,
50 static ssize_t
ahci_transmit_led_message(struct ata_port
*ap
, u32 state
,
55 static int ahci_scr_read(struct ata_link
*link
, unsigned int sc_reg
, u32
*val
);
56 static int ahci_scr_write(struct ata_link
*link
, unsigned int sc_reg
, u32 val
);
57 static bool ahci_qc_fill_rtf(struct ata_queued_cmd
*qc
);
58 static int ahci_port_start(struct ata_port
*ap
);
59 static void ahci_port_stop(struct ata_port
*ap
);
60 static void ahci_qc_prep(struct ata_queued_cmd
*qc
);
61 static int ahci_pmp_qc_defer(struct ata_queued_cmd
*qc
);
62 static void ahci_freeze(struct ata_port
*ap
);
63 static void ahci_thaw(struct ata_port
*ap
);
64 static void ahci_set_aggressive_devslp(struct ata_port
*ap
, bool sleep
);
65 static void ahci_enable_fbs(struct ata_port
*ap
);
66 static void ahci_disable_fbs(struct ata_port
*ap
);
67 static void ahci_pmp_attach(struct ata_port
*ap
);
68 static void ahci_pmp_detach(struct ata_port
*ap
);
69 static int ahci_softreset(struct ata_link
*link
, unsigned int *class,
70 unsigned long deadline
);
71 static int ahci_pmp_retry_softreset(struct ata_link
*link
, unsigned int *class,
72 unsigned long deadline
);
73 static int ahci_hardreset(struct ata_link
*link
, unsigned int *class,
74 unsigned long deadline
);
75 static void ahci_postreset(struct ata_link
*link
, unsigned int *class);
76 static void ahci_post_internal_cmd(struct ata_queued_cmd
*qc
);
77 static void ahci_dev_config(struct ata_device
*dev
);
79 static int ahci_port_suspend(struct ata_port
*ap
, pm_message_t mesg
);
81 static ssize_t
ahci_activity_show(struct ata_device
*dev
, char *buf
);
82 static ssize_t
ahci_activity_store(struct ata_device
*dev
,
83 enum sw_activity val
);
84 static void ahci_init_sw_activity(struct ata_link
*link
);
86 static ssize_t
ahci_show_host_caps(struct device
*dev
,
87 struct device_attribute
*attr
, char *buf
);
88 static ssize_t
ahci_show_host_cap2(struct device
*dev
,
89 struct device_attribute
*attr
, char *buf
);
90 static ssize_t
ahci_show_host_version(struct device
*dev
,
91 struct device_attribute
*attr
, char *buf
);
92 static ssize_t
ahci_show_port_cmd(struct device
*dev
,
93 struct device_attribute
*attr
, char *buf
);
94 static ssize_t
ahci_read_em_buffer(struct device
*dev
,
95 struct device_attribute
*attr
, char *buf
);
96 static ssize_t
ahci_store_em_buffer(struct device
*dev
,
97 struct device_attribute
*attr
,
98 const char *buf
, size_t size
);
99 static ssize_t
ahci_show_em_supported(struct device
*dev
,
100 struct device_attribute
*attr
, char *buf
);
101 static irqreturn_t
ahci_single_level_irq_intr(int irq
, void *dev_instance
);
103 static DEVICE_ATTR(ahci_host_caps
, S_IRUGO
, ahci_show_host_caps
, NULL
);
104 static DEVICE_ATTR(ahci_host_cap2
, S_IRUGO
, ahci_show_host_cap2
, NULL
);
105 static DEVICE_ATTR(ahci_host_version
, S_IRUGO
, ahci_show_host_version
, NULL
);
106 static DEVICE_ATTR(ahci_port_cmd
, S_IRUGO
, ahci_show_port_cmd
, NULL
);
107 static DEVICE_ATTR(em_buffer
, S_IWUSR
| S_IRUGO
,
108 ahci_read_em_buffer
, ahci_store_em_buffer
);
109 static DEVICE_ATTR(em_message_supported
, S_IRUGO
, ahci_show_em_supported
, NULL
);
111 struct device_attribute
*ahci_shost_attrs
[] = {
112 &dev_attr_link_power_management_policy
,
113 &dev_attr_em_message_type
,
114 &dev_attr_em_message
,
115 &dev_attr_ahci_host_caps
,
116 &dev_attr_ahci_host_cap2
,
117 &dev_attr_ahci_host_version
,
118 &dev_attr_ahci_port_cmd
,
120 &dev_attr_em_message_supported
,
123 EXPORT_SYMBOL_GPL(ahci_shost_attrs
);
125 struct device_attribute
*ahci_sdev_attrs
[] = {
126 &dev_attr_sw_activity
,
127 &dev_attr_unload_heads
,
128 &dev_attr_ncq_prio_enable
,
131 EXPORT_SYMBOL_GPL(ahci_sdev_attrs
);
133 struct ata_port_operations ahci_ops
= {
134 .inherits
= &sata_pmp_port_ops
,
136 .qc_defer
= ahci_pmp_qc_defer
,
137 .qc_prep
= ahci_qc_prep
,
138 .qc_issue
= ahci_qc_issue
,
139 .qc_fill_rtf
= ahci_qc_fill_rtf
,
141 .freeze
= ahci_freeze
,
143 .softreset
= ahci_softreset
,
144 .hardreset
= ahci_hardreset
,
145 .postreset
= ahci_postreset
,
146 .pmp_softreset
= ahci_softreset
,
147 .error_handler
= ahci_error_handler
,
148 .post_internal_cmd
= ahci_post_internal_cmd
,
149 .dev_config
= ahci_dev_config
,
151 .scr_read
= ahci_scr_read
,
152 .scr_write
= ahci_scr_write
,
153 .pmp_attach
= ahci_pmp_attach
,
154 .pmp_detach
= ahci_pmp_detach
,
156 .set_lpm
= ahci_set_lpm
,
157 .em_show
= ahci_led_show
,
158 .em_store
= ahci_led_store
,
159 .sw_activity_show
= ahci_activity_show
,
160 .sw_activity_store
= ahci_activity_store
,
161 .transmit_led_message
= ahci_transmit_led_message
,
163 .port_suspend
= ahci_port_suspend
,
164 .port_resume
= ahci_port_resume
,
166 .port_start
= ahci_port_start
,
167 .port_stop
= ahci_port_stop
,
169 EXPORT_SYMBOL_GPL(ahci_ops
);
171 struct ata_port_operations ahci_pmp_retry_srst_ops
= {
172 .inherits
= &ahci_ops
,
173 .softreset
= ahci_pmp_retry_softreset
,
175 EXPORT_SYMBOL_GPL(ahci_pmp_retry_srst_ops
);
177 static bool ahci_em_messages __read_mostly
= true;
178 module_param(ahci_em_messages
, bool, 0444);
179 /* add other LED protocol types when they become supported */
180 MODULE_PARM_DESC(ahci_em_messages
,
181 "AHCI Enclosure Management Message control (0 = off, 1 = on)");
183 /* device sleep idle timeout in ms */
184 static int devslp_idle_timeout __read_mostly
= 1000;
185 module_param(devslp_idle_timeout
, int, 0644);
186 MODULE_PARM_DESC(devslp_idle_timeout
, "device sleep idle timeout");
188 static void ahci_enable_ahci(void __iomem
*mmio
)
193 /* turn on AHCI_EN */
194 tmp
= readl(mmio
+ HOST_CTL
);
195 if (tmp
& HOST_AHCI_EN
)
198 /* Some controllers need AHCI_EN to be written multiple times.
199 * Try a few times before giving up.
201 for (i
= 0; i
< 5; i
++) {
203 writel(tmp
, mmio
+ HOST_CTL
);
204 tmp
= readl(mmio
+ HOST_CTL
); /* flush && sanity check */
205 if (tmp
& HOST_AHCI_EN
)
214 * ahci_rpm_get_port - Make sure the port is powered on
215 * @ap: Port to power on
217 * Whenever there is need to access the AHCI host registers outside of
218 * normal execution paths, call this function to make sure the host is
219 * actually powered on.
221 static int ahci_rpm_get_port(struct ata_port
*ap
)
223 return pm_runtime_get_sync(ap
->dev
);
227 * ahci_rpm_put_port - Undoes ahci_rpm_get_port()
228 * @ap: Port to power down
230 * Undoes ahci_rpm_get_port() and possibly powers down the AHCI host
231 * if it has no more active users.
233 static void ahci_rpm_put_port(struct ata_port
*ap
)
235 pm_runtime_put(ap
->dev
);
238 static ssize_t
ahci_show_host_caps(struct device
*dev
,
239 struct device_attribute
*attr
, char *buf
)
241 struct Scsi_Host
*shost
= class_to_shost(dev
);
242 struct ata_port
*ap
= ata_shost_to_port(shost
);
243 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
245 return sprintf(buf
, "%x\n", hpriv
->cap
);
248 static ssize_t
ahci_show_host_cap2(struct device
*dev
,
249 struct device_attribute
*attr
, char *buf
)
251 struct Scsi_Host
*shost
= class_to_shost(dev
);
252 struct ata_port
*ap
= ata_shost_to_port(shost
);
253 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
255 return sprintf(buf
, "%x\n", hpriv
->cap2
);
258 static ssize_t
ahci_show_host_version(struct device
*dev
,
259 struct device_attribute
*attr
, char *buf
)
261 struct Scsi_Host
*shost
= class_to_shost(dev
);
262 struct ata_port
*ap
= ata_shost_to_port(shost
);
263 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
265 return sprintf(buf
, "%x\n", hpriv
->version
);
268 static ssize_t
ahci_show_port_cmd(struct device
*dev
,
269 struct device_attribute
*attr
, char *buf
)
271 struct Scsi_Host
*shost
= class_to_shost(dev
);
272 struct ata_port
*ap
= ata_shost_to_port(shost
);
273 void __iomem
*port_mmio
= ahci_port_base(ap
);
276 ahci_rpm_get_port(ap
);
277 ret
= sprintf(buf
, "%x\n", readl(port_mmio
+ PORT_CMD
));
278 ahci_rpm_put_port(ap
);
283 static ssize_t
ahci_read_em_buffer(struct device
*dev
,
284 struct device_attribute
*attr
, char *buf
)
286 struct Scsi_Host
*shost
= class_to_shost(dev
);
287 struct ata_port
*ap
= ata_shost_to_port(shost
);
288 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
289 void __iomem
*mmio
= hpriv
->mmio
;
290 void __iomem
*em_mmio
= mmio
+ hpriv
->em_loc
;
296 ahci_rpm_get_port(ap
);
297 spin_lock_irqsave(ap
->lock
, flags
);
299 em_ctl
= readl(mmio
+ HOST_EM_CTL
);
300 if (!(ap
->flags
& ATA_FLAG_EM
) || em_ctl
& EM_CTL_XMT
||
301 !(hpriv
->em_msg_type
& EM_MSG_TYPE_SGPIO
)) {
302 spin_unlock_irqrestore(ap
->lock
, flags
);
303 ahci_rpm_put_port(ap
);
307 if (!(em_ctl
& EM_CTL_MR
)) {
308 spin_unlock_irqrestore(ap
->lock
, flags
);
309 ahci_rpm_put_port(ap
);
313 if (!(em_ctl
& EM_CTL_SMB
))
314 em_mmio
+= hpriv
->em_buf_sz
;
316 count
= hpriv
->em_buf_sz
;
318 /* the count should not be larger than PAGE_SIZE */
319 if (count
> PAGE_SIZE
) {
320 if (printk_ratelimit())
322 "EM read buffer size too large: "
323 "buffer size %u, page size %lu\n",
324 hpriv
->em_buf_sz
, PAGE_SIZE
);
328 for (i
= 0; i
< count
; i
+= 4) {
329 msg
= readl(em_mmio
+ i
);
331 buf
[i
+ 1] = (msg
>> 8) & 0xff;
332 buf
[i
+ 2] = (msg
>> 16) & 0xff;
333 buf
[i
+ 3] = (msg
>> 24) & 0xff;
336 spin_unlock_irqrestore(ap
->lock
, flags
);
337 ahci_rpm_put_port(ap
);
342 static ssize_t
ahci_store_em_buffer(struct device
*dev
,
343 struct device_attribute
*attr
,
344 const char *buf
, size_t size
)
346 struct Scsi_Host
*shost
= class_to_shost(dev
);
347 struct ata_port
*ap
= ata_shost_to_port(shost
);
348 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
349 void __iomem
*mmio
= hpriv
->mmio
;
350 void __iomem
*em_mmio
= mmio
+ hpriv
->em_loc
;
351 const unsigned char *msg_buf
= buf
;
356 /* check size validity */
357 if (!(ap
->flags
& ATA_FLAG_EM
) ||
358 !(hpriv
->em_msg_type
& EM_MSG_TYPE_SGPIO
) ||
359 size
% 4 || size
> hpriv
->em_buf_sz
)
362 ahci_rpm_get_port(ap
);
363 spin_lock_irqsave(ap
->lock
, flags
);
365 em_ctl
= readl(mmio
+ HOST_EM_CTL
);
366 if (em_ctl
& EM_CTL_TM
) {
367 spin_unlock_irqrestore(ap
->lock
, flags
);
368 ahci_rpm_put_port(ap
);
372 for (i
= 0; i
< size
; i
+= 4) {
373 msg
= msg_buf
[i
] | msg_buf
[i
+ 1] << 8 |
374 msg_buf
[i
+ 2] << 16 | msg_buf
[i
+ 3] << 24;
375 writel(msg
, em_mmio
+ i
);
378 writel(em_ctl
| EM_CTL_TM
, mmio
+ HOST_EM_CTL
);
380 spin_unlock_irqrestore(ap
->lock
, flags
);
381 ahci_rpm_put_port(ap
);
386 static ssize_t
ahci_show_em_supported(struct device
*dev
,
387 struct device_attribute
*attr
, char *buf
)
389 struct Scsi_Host
*shost
= class_to_shost(dev
);
390 struct ata_port
*ap
= ata_shost_to_port(shost
);
391 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
392 void __iomem
*mmio
= hpriv
->mmio
;
395 ahci_rpm_get_port(ap
);
396 em_ctl
= readl(mmio
+ HOST_EM_CTL
);
397 ahci_rpm_put_port(ap
);
399 return sprintf(buf
, "%s%s%s%s\n",
400 em_ctl
& EM_CTL_LED
? "led " : "",
401 em_ctl
& EM_CTL_SAFTE
? "saf-te " : "",
402 em_ctl
& EM_CTL_SES
? "ses-2 " : "",
403 em_ctl
& EM_CTL_SGPIO
? "sgpio " : "");
407 * ahci_save_initial_config - Save and fixup initial config values
408 * @dev: target AHCI device
409 * @hpriv: host private area to store config values
411 * Some registers containing configuration info might be setup by
412 * BIOS and might be cleared on reset. This function saves the
413 * initial values of those registers into @hpriv such that they
414 * can be restored after controller reset.
416 * If inconsistent, config values are fixed up by this function.
418 * If it is not set already this function sets hpriv->start_engine to
424 void ahci_save_initial_config(struct device
*dev
, struct ahci_host_priv
*hpriv
)
426 void __iomem
*mmio
= hpriv
->mmio
;
427 u32 cap
, cap2
, vers
, port_map
;
430 /* make sure AHCI mode is enabled before accessing CAP */
431 ahci_enable_ahci(mmio
);
433 /* Values prefixed with saved_ are written back to host after
434 * reset. Values without are used for driver operation.
436 hpriv
->saved_cap
= cap
= readl(mmio
+ HOST_CAP
);
437 hpriv
->saved_port_map
= port_map
= readl(mmio
+ HOST_PORTS_IMPL
);
439 /* CAP2 register is only defined for AHCI 1.2 and later */
440 vers
= readl(mmio
+ HOST_VERSION
);
441 if ((vers
>> 16) > 1 ||
442 ((vers
>> 16) == 1 && (vers
& 0xFFFF) >= 0x200))
443 hpriv
->saved_cap2
= cap2
= readl(mmio
+ HOST_CAP2
);
445 hpriv
->saved_cap2
= cap2
= 0;
447 /* some chips have errata preventing 64bit use */
448 if ((cap
& HOST_CAP_64
) && (hpriv
->flags
& AHCI_HFLAG_32BIT_ONLY
)) {
449 dev_info(dev
, "controller can't do 64bit DMA, forcing 32bit\n");
453 if ((cap
& HOST_CAP_NCQ
) && (hpriv
->flags
& AHCI_HFLAG_NO_NCQ
)) {
454 dev_info(dev
, "controller can't do NCQ, turning off CAP_NCQ\n");
455 cap
&= ~HOST_CAP_NCQ
;
458 if (!(cap
& HOST_CAP_NCQ
) && (hpriv
->flags
& AHCI_HFLAG_YES_NCQ
)) {
459 dev_info(dev
, "controller can do NCQ, turning on CAP_NCQ\n");
463 if ((cap
& HOST_CAP_PMP
) && (hpriv
->flags
& AHCI_HFLAG_NO_PMP
)) {
464 dev_info(dev
, "controller can't do PMP, turning off CAP_PMP\n");
465 cap
&= ~HOST_CAP_PMP
;
468 if ((cap
& HOST_CAP_SNTF
) && (hpriv
->flags
& AHCI_HFLAG_NO_SNTF
)) {
470 "controller can't do SNTF, turning off CAP_SNTF\n");
471 cap
&= ~HOST_CAP_SNTF
;
474 if ((cap2
& HOST_CAP2_SDS
) && (hpriv
->flags
& AHCI_HFLAG_NO_DEVSLP
)) {
476 "controller can't do DEVSLP, turning off\n");
477 cap2
&= ~HOST_CAP2_SDS
;
478 cap2
&= ~HOST_CAP2_SADM
;
481 if (!(cap
& HOST_CAP_FBS
) && (hpriv
->flags
& AHCI_HFLAG_YES_FBS
)) {
482 dev_info(dev
, "controller can do FBS, turning on CAP_FBS\n");
486 if ((cap
& HOST_CAP_FBS
) && (hpriv
->flags
& AHCI_HFLAG_NO_FBS
)) {
487 dev_info(dev
, "controller can't do FBS, turning off CAP_FBS\n");
488 cap
&= ~HOST_CAP_FBS
;
491 if (!(cap
& HOST_CAP_ALPM
) && (hpriv
->flags
& AHCI_HFLAG_YES_ALPM
)) {
492 dev_info(dev
, "controller can do ALPM, turning on CAP_ALPM\n");
493 cap
|= HOST_CAP_ALPM
;
496 if (hpriv
->force_port_map
&& port_map
!= hpriv
->force_port_map
) {
497 dev_info(dev
, "forcing port_map 0x%x -> 0x%x\n",
498 port_map
, hpriv
->force_port_map
);
499 port_map
= hpriv
->force_port_map
;
500 hpriv
->saved_port_map
= port_map
;
503 if (hpriv
->mask_port_map
) {
504 dev_warn(dev
, "masking port_map 0x%x -> 0x%x\n",
506 port_map
& hpriv
->mask_port_map
);
507 port_map
&= hpriv
->mask_port_map
;
510 /* cross check port_map and cap.n_ports */
514 for (i
= 0; i
< AHCI_MAX_PORTS
; i
++)
515 if (port_map
& (1 << i
))
518 /* If PI has more ports than n_ports, whine, clear
519 * port_map and let it be generated from n_ports.
521 if (map_ports
> ahci_nr_ports(cap
)) {
523 "implemented port map (0x%x) contains more ports than nr_ports (%u), using nr_ports\n",
524 port_map
, ahci_nr_ports(cap
));
529 /* fabricate port_map from cap.nr_ports for < AHCI 1.3 */
530 if (!port_map
&& vers
< 0x10300) {
531 port_map
= (1 << ahci_nr_ports(cap
)) - 1;
532 dev_warn(dev
, "forcing PORTS_IMPL to 0x%x\n", port_map
);
534 /* write the fixed up value to the PI register */
535 hpriv
->saved_port_map
= port_map
;
538 /* record values to use during operation */
541 hpriv
->version
= readl(mmio
+ HOST_VERSION
);
542 hpriv
->port_map
= port_map
;
544 if (!hpriv
->start_engine
)
545 hpriv
->start_engine
= ahci_start_engine
;
547 if (!hpriv
->stop_engine
)
548 hpriv
->stop_engine
= ahci_stop_engine
;
550 if (!hpriv
->irq_handler
)
551 hpriv
->irq_handler
= ahci_single_level_irq_intr
;
553 EXPORT_SYMBOL_GPL(ahci_save_initial_config
);
556 * ahci_restore_initial_config - Restore initial config
557 * @host: target ATA host
559 * Restore initial config stored by ahci_save_initial_config().
564 static void ahci_restore_initial_config(struct ata_host
*host
)
566 struct ahci_host_priv
*hpriv
= host
->private_data
;
567 void __iomem
*mmio
= hpriv
->mmio
;
569 writel(hpriv
->saved_cap
, mmio
+ HOST_CAP
);
570 if (hpriv
->saved_cap2
)
571 writel(hpriv
->saved_cap2
, mmio
+ HOST_CAP2
);
572 writel(hpriv
->saved_port_map
, mmio
+ HOST_PORTS_IMPL
);
573 (void) readl(mmio
+ HOST_PORTS_IMPL
); /* flush */
576 static unsigned ahci_scr_offset(struct ata_port
*ap
, unsigned int sc_reg
)
578 static const int offset
[] = {
579 [SCR_STATUS
] = PORT_SCR_STAT
,
580 [SCR_CONTROL
] = PORT_SCR_CTL
,
581 [SCR_ERROR
] = PORT_SCR_ERR
,
582 [SCR_ACTIVE
] = PORT_SCR_ACT
,
583 [SCR_NOTIFICATION
] = PORT_SCR_NTF
,
585 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
587 if (sc_reg
< ARRAY_SIZE(offset
) &&
588 (sc_reg
!= SCR_NOTIFICATION
|| (hpriv
->cap
& HOST_CAP_SNTF
)))
589 return offset
[sc_reg
];
593 static int ahci_scr_read(struct ata_link
*link
, unsigned int sc_reg
, u32
*val
)
595 void __iomem
*port_mmio
= ahci_port_base(link
->ap
);
596 int offset
= ahci_scr_offset(link
->ap
, sc_reg
);
599 *val
= readl(port_mmio
+ offset
);
605 static int ahci_scr_write(struct ata_link
*link
, unsigned int sc_reg
, u32 val
)
607 void __iomem
*port_mmio
= ahci_port_base(link
->ap
);
608 int offset
= ahci_scr_offset(link
->ap
, sc_reg
);
611 writel(val
, port_mmio
+ offset
);
617 void ahci_start_engine(struct ata_port
*ap
)
619 void __iomem
*port_mmio
= ahci_port_base(ap
);
623 tmp
= readl(port_mmio
+ PORT_CMD
);
624 tmp
|= PORT_CMD_START
;
625 writel(tmp
, port_mmio
+ PORT_CMD
);
626 readl(port_mmio
+ PORT_CMD
); /* flush */
628 EXPORT_SYMBOL_GPL(ahci_start_engine
);
630 int ahci_stop_engine(struct ata_port
*ap
)
632 void __iomem
*port_mmio
= ahci_port_base(ap
);
633 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
637 * On some controllers, stopping a port's DMA engine while the port
638 * is in ALPM state (partial or slumber) results in failures on
639 * subsequent DMA engine starts. For those controllers, put the
640 * port back in active state before stopping its DMA engine.
642 if ((hpriv
->flags
& AHCI_HFLAG_WAKE_BEFORE_STOP
) &&
643 (ap
->link
.lpm_policy
> ATA_LPM_MAX_POWER
) &&
644 ahci_set_lpm(&ap
->link
, ATA_LPM_MAX_POWER
, ATA_LPM_WAKE_ONLY
)) {
645 dev_err(ap
->host
->dev
, "Failed to wake up port before engine stop\n");
649 tmp
= readl(port_mmio
+ PORT_CMD
);
651 /* check if the HBA is idle */
652 if ((tmp
& (PORT_CMD_START
| PORT_CMD_LIST_ON
)) == 0)
656 * Don't try to issue commands but return with ENODEV if the
657 * AHCI controller not available anymore (e.g. due to PCIe hot
658 * unplugging). Otherwise a 500ms delay for each port is added.
660 if (tmp
== 0xffffffff) {
661 dev_err(ap
->host
->dev
, "AHCI controller unavailable!\n");
665 /* setting HBA to idle */
666 tmp
&= ~PORT_CMD_START
;
667 writel(tmp
, port_mmio
+ PORT_CMD
);
669 /* wait for engine to stop. This could be as long as 500 msec */
670 tmp
= ata_wait_register(ap
, port_mmio
+ PORT_CMD
,
671 PORT_CMD_LIST_ON
, PORT_CMD_LIST_ON
, 1, 500);
672 if (tmp
& PORT_CMD_LIST_ON
)
677 EXPORT_SYMBOL_GPL(ahci_stop_engine
);
679 void ahci_start_fis_rx(struct ata_port
*ap
)
681 void __iomem
*port_mmio
= ahci_port_base(ap
);
682 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
683 struct ahci_port_priv
*pp
= ap
->private_data
;
686 /* set FIS registers */
687 if (hpriv
->cap
& HOST_CAP_64
)
688 writel((pp
->cmd_slot_dma
>> 16) >> 16,
689 port_mmio
+ PORT_LST_ADDR_HI
);
690 writel(pp
->cmd_slot_dma
& 0xffffffff, port_mmio
+ PORT_LST_ADDR
);
692 if (hpriv
->cap
& HOST_CAP_64
)
693 writel((pp
->rx_fis_dma
>> 16) >> 16,
694 port_mmio
+ PORT_FIS_ADDR_HI
);
695 writel(pp
->rx_fis_dma
& 0xffffffff, port_mmio
+ PORT_FIS_ADDR
);
697 /* enable FIS reception */
698 tmp
= readl(port_mmio
+ PORT_CMD
);
699 tmp
|= PORT_CMD_FIS_RX
;
700 writel(tmp
, port_mmio
+ PORT_CMD
);
703 readl(port_mmio
+ PORT_CMD
);
705 EXPORT_SYMBOL_GPL(ahci_start_fis_rx
);
707 static int ahci_stop_fis_rx(struct ata_port
*ap
)
709 void __iomem
*port_mmio
= ahci_port_base(ap
);
712 /* disable FIS reception */
713 tmp
= readl(port_mmio
+ PORT_CMD
);
714 tmp
&= ~PORT_CMD_FIS_RX
;
715 writel(tmp
, port_mmio
+ PORT_CMD
);
717 /* wait for completion, spec says 500ms, give it 1000 */
718 tmp
= ata_wait_register(ap
, port_mmio
+ PORT_CMD
, PORT_CMD_FIS_ON
,
719 PORT_CMD_FIS_ON
, 10, 1000);
720 if (tmp
& PORT_CMD_FIS_ON
)
726 static void ahci_power_up(struct ata_port
*ap
)
728 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
729 void __iomem
*port_mmio
= ahci_port_base(ap
);
732 cmd
= readl(port_mmio
+ PORT_CMD
) & ~PORT_CMD_ICC_MASK
;
735 if (hpriv
->cap
& HOST_CAP_SSS
) {
736 cmd
|= PORT_CMD_SPIN_UP
;
737 writel(cmd
, port_mmio
+ PORT_CMD
);
741 writel(cmd
| PORT_CMD_ICC_ACTIVE
, port_mmio
+ PORT_CMD
);
744 static int ahci_set_lpm(struct ata_link
*link
, enum ata_lpm_policy policy
,
747 struct ata_port
*ap
= link
->ap
;
748 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
749 struct ahci_port_priv
*pp
= ap
->private_data
;
750 void __iomem
*port_mmio
= ahci_port_base(ap
);
752 if (policy
!= ATA_LPM_MAX_POWER
) {
753 /* wakeup flag only applies to the max power policy */
754 hints
&= ~ATA_LPM_WAKE_ONLY
;
757 * Disable interrupts on Phy Ready. This keeps us from
758 * getting woken up due to spurious phy ready
761 pp
->intr_mask
&= ~PORT_IRQ_PHYRDY
;
762 writel(pp
->intr_mask
, port_mmio
+ PORT_IRQ_MASK
);
764 sata_link_scr_lpm(link
, policy
, false);
767 if (hpriv
->cap
& HOST_CAP_ALPM
) {
768 u32 cmd
= readl(port_mmio
+ PORT_CMD
);
770 if (policy
== ATA_LPM_MAX_POWER
|| !(hints
& ATA_LPM_HIPM
)) {
771 if (!(hints
& ATA_LPM_WAKE_ONLY
))
772 cmd
&= ~(PORT_CMD_ASP
| PORT_CMD_ALPE
);
773 cmd
|= PORT_CMD_ICC_ACTIVE
;
775 writel(cmd
, port_mmio
+ PORT_CMD
);
776 readl(port_mmio
+ PORT_CMD
);
778 /* wait 10ms to be sure we've come out of LPM state */
781 if (hints
& ATA_LPM_WAKE_ONLY
)
784 cmd
|= PORT_CMD_ALPE
;
785 if (policy
== ATA_LPM_MIN_POWER
)
787 else if (policy
== ATA_LPM_MIN_POWER_WITH_PARTIAL
)
788 cmd
&= ~PORT_CMD_ASP
;
790 /* write out new cmd value */
791 writel(cmd
, port_mmio
+ PORT_CMD
);
795 /* set aggressive device sleep */
796 if ((hpriv
->cap2
& HOST_CAP2_SDS
) &&
797 (hpriv
->cap2
& HOST_CAP2_SADM
) &&
798 (link
->device
->flags
& ATA_DFLAG_DEVSLP
)) {
799 if (policy
== ATA_LPM_MIN_POWER
||
800 policy
== ATA_LPM_MIN_POWER_WITH_PARTIAL
)
801 ahci_set_aggressive_devslp(ap
, true);
803 ahci_set_aggressive_devslp(ap
, false);
806 if (policy
== ATA_LPM_MAX_POWER
) {
807 sata_link_scr_lpm(link
, policy
, false);
809 /* turn PHYRDY IRQ back on */
810 pp
->intr_mask
|= PORT_IRQ_PHYRDY
;
811 writel(pp
->intr_mask
, port_mmio
+ PORT_IRQ_MASK
);
818 static void ahci_power_down(struct ata_port
*ap
)
820 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
821 void __iomem
*port_mmio
= ahci_port_base(ap
);
824 if (!(hpriv
->cap
& HOST_CAP_SSS
))
827 /* put device into listen mode, first set PxSCTL.DET to 0 */
828 scontrol
= readl(port_mmio
+ PORT_SCR_CTL
);
830 writel(scontrol
, port_mmio
+ PORT_SCR_CTL
);
832 /* then set PxCMD.SUD to 0 */
833 cmd
= readl(port_mmio
+ PORT_CMD
) & ~PORT_CMD_ICC_MASK
;
834 cmd
&= ~PORT_CMD_SPIN_UP
;
835 writel(cmd
, port_mmio
+ PORT_CMD
);
839 static void ahci_start_port(struct ata_port
*ap
)
841 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
842 struct ahci_port_priv
*pp
= ap
->private_data
;
843 struct ata_link
*link
;
844 struct ahci_em_priv
*emp
;
848 /* enable FIS reception */
849 ahci_start_fis_rx(ap
);
852 if (!(hpriv
->flags
& AHCI_HFLAG_DELAY_ENGINE
))
853 hpriv
->start_engine(ap
);
856 if (ap
->flags
& ATA_FLAG_EM
) {
857 ata_for_each_link(link
, ap
, EDGE
) {
858 emp
= &pp
->em_priv
[link
->pmp
];
860 /* EM Transmit bit maybe busy during init */
861 for (i
= 0; i
< EM_MAX_RETRY
; i
++) {
862 rc
= ap
->ops
->transmit_led_message(ap
,
866 * If busy, give a breather but do not
867 * release EH ownership by using msleep()
868 * instead of ata_msleep(). EM Transmit
869 * bit is busy for the whole host and
870 * releasing ownership will cause other
871 * ports to fail the same way.
881 if (ap
->flags
& ATA_FLAG_SW_ACTIVITY
)
882 ata_for_each_link(link
, ap
, EDGE
)
883 ahci_init_sw_activity(link
);
887 static int ahci_deinit_port(struct ata_port
*ap
, const char **emsg
)
890 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
893 rc
= hpriv
->stop_engine(ap
);
895 *emsg
= "failed to stop engine";
899 /* disable FIS reception */
900 rc
= ahci_stop_fis_rx(ap
);
902 *emsg
= "failed stop FIS RX";
909 int ahci_reset_controller(struct ata_host
*host
)
911 struct ahci_host_priv
*hpriv
= host
->private_data
;
912 void __iomem
*mmio
= hpriv
->mmio
;
915 /* we must be in AHCI mode, before using anything
916 * AHCI-specific, such as HOST_RESET.
918 ahci_enable_ahci(mmio
);
920 /* global controller reset */
921 if (!ahci_skip_host_reset
) {
922 tmp
= readl(mmio
+ HOST_CTL
);
923 if ((tmp
& HOST_RESET
) == 0) {
924 writel(tmp
| HOST_RESET
, mmio
+ HOST_CTL
);
925 readl(mmio
+ HOST_CTL
); /* flush */
929 * to perform host reset, OS should set HOST_RESET
930 * and poll until this bit is read to be "0".
931 * reset must complete within 1 second, or
932 * the hardware should be considered fried.
934 tmp
= ata_wait_register(NULL
, mmio
+ HOST_CTL
, HOST_RESET
,
935 HOST_RESET
, 10, 1000);
937 if (tmp
& HOST_RESET
) {
938 dev_err(host
->dev
, "controller reset failed (0x%x)\n",
943 /* turn on AHCI mode */
944 ahci_enable_ahci(mmio
);
946 /* Some registers might be cleared on reset. Restore
949 if (!(hpriv
->flags
& AHCI_HFLAG_NO_WRITE_TO_RO
))
950 ahci_restore_initial_config(host
);
952 dev_info(host
->dev
, "skipping global host reset\n");
956 EXPORT_SYMBOL_GPL(ahci_reset_controller
);
958 static void ahci_sw_activity(struct ata_link
*link
)
960 struct ata_port
*ap
= link
->ap
;
961 struct ahci_port_priv
*pp
= ap
->private_data
;
962 struct ahci_em_priv
*emp
= &pp
->em_priv
[link
->pmp
];
964 if (!(link
->flags
& ATA_LFLAG_SW_ACTIVITY
))
968 if (!timer_pending(&emp
->timer
))
969 mod_timer(&emp
->timer
, jiffies
+ msecs_to_jiffies(10));
972 static void ahci_sw_activity_blink(struct timer_list
*t
)
974 struct ahci_em_priv
*emp
= from_timer(emp
, t
, timer
);
975 struct ata_link
*link
= emp
->link
;
976 struct ata_port
*ap
= link
->ap
;
978 unsigned long led_message
= emp
->led_state
;
979 u32 activity_led_state
;
982 led_message
&= EM_MSG_LED_VALUE
;
983 led_message
|= ap
->port_no
| (link
->pmp
<< 8);
985 /* check to see if we've had activity. If so,
986 * toggle state of LED and reset timer. If not,
987 * turn LED to desired idle state.
989 spin_lock_irqsave(ap
->lock
, flags
);
990 if (emp
->saved_activity
!= emp
->activity
) {
991 emp
->saved_activity
= emp
->activity
;
992 /* get the current LED state */
993 activity_led_state
= led_message
& EM_MSG_LED_VALUE_ON
;
995 if (activity_led_state
)
996 activity_led_state
= 0;
998 activity_led_state
= 1;
1000 /* clear old state */
1001 led_message
&= ~EM_MSG_LED_VALUE_ACTIVITY
;
1004 led_message
|= (activity_led_state
<< 16);
1005 mod_timer(&emp
->timer
, jiffies
+ msecs_to_jiffies(100));
1007 /* switch to idle */
1008 led_message
&= ~EM_MSG_LED_VALUE_ACTIVITY
;
1009 if (emp
->blink_policy
== BLINK_OFF
)
1010 led_message
|= (1 << 16);
1012 spin_unlock_irqrestore(ap
->lock
, flags
);
1013 ap
->ops
->transmit_led_message(ap
, led_message
, 4);
1016 static void ahci_init_sw_activity(struct ata_link
*link
)
1018 struct ata_port
*ap
= link
->ap
;
1019 struct ahci_port_priv
*pp
= ap
->private_data
;
1020 struct ahci_em_priv
*emp
= &pp
->em_priv
[link
->pmp
];
1022 /* init activity stats, setup timer */
1023 emp
->saved_activity
= emp
->activity
= 0;
1025 timer_setup(&emp
->timer
, ahci_sw_activity_blink
, 0);
1027 /* check our blink policy and set flag for link if it's enabled */
1028 if (emp
->blink_policy
)
1029 link
->flags
|= ATA_LFLAG_SW_ACTIVITY
;
1032 int ahci_reset_em(struct ata_host
*host
)
1034 struct ahci_host_priv
*hpriv
= host
->private_data
;
1035 void __iomem
*mmio
= hpriv
->mmio
;
1038 em_ctl
= readl(mmio
+ HOST_EM_CTL
);
1039 if ((em_ctl
& EM_CTL_TM
) || (em_ctl
& EM_CTL_RST
))
1042 writel(em_ctl
| EM_CTL_RST
, mmio
+ HOST_EM_CTL
);
1045 EXPORT_SYMBOL_GPL(ahci_reset_em
);
1047 static ssize_t
ahci_transmit_led_message(struct ata_port
*ap
, u32 state
,
1050 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
1051 struct ahci_port_priv
*pp
= ap
->private_data
;
1052 void __iomem
*mmio
= hpriv
->mmio
;
1054 u32 message
[] = {0, 0};
1055 unsigned long flags
;
1057 struct ahci_em_priv
*emp
;
1059 /* get the slot number from the message */
1060 pmp
= (state
& EM_MSG_LED_PMP_SLOT
) >> 8;
1061 if (pmp
< EM_MAX_SLOTS
)
1062 emp
= &pp
->em_priv
[pmp
];
1066 ahci_rpm_get_port(ap
);
1067 spin_lock_irqsave(ap
->lock
, flags
);
1070 * if we are still busy transmitting a previous message,
1073 em_ctl
= readl(mmio
+ HOST_EM_CTL
);
1074 if (em_ctl
& EM_CTL_TM
) {
1075 spin_unlock_irqrestore(ap
->lock
, flags
);
1076 ahci_rpm_put_port(ap
);
1080 if (hpriv
->em_msg_type
& EM_MSG_TYPE_LED
) {
1082 * create message header - this is all zero except for
1083 * the message size, which is 4 bytes.
1085 message
[0] |= (4 << 8);
1087 /* ignore 0:4 of byte zero, fill in port info yourself */
1088 message
[1] = ((state
& ~EM_MSG_LED_HBA_PORT
) | ap
->port_no
);
1090 /* write message to EM_LOC */
1091 writel(message
[0], mmio
+ hpriv
->em_loc
);
1092 writel(message
[1], mmio
+ hpriv
->em_loc
+4);
1095 * tell hardware to transmit the message
1097 writel(em_ctl
| EM_CTL_TM
, mmio
+ HOST_EM_CTL
);
1100 /* save off new led state for port/slot */
1101 emp
->led_state
= state
;
1103 spin_unlock_irqrestore(ap
->lock
, flags
);
1104 ahci_rpm_put_port(ap
);
1109 static ssize_t
ahci_led_show(struct ata_port
*ap
, char *buf
)
1111 struct ahci_port_priv
*pp
= ap
->private_data
;
1112 struct ata_link
*link
;
1113 struct ahci_em_priv
*emp
;
1116 ata_for_each_link(link
, ap
, EDGE
) {
1117 emp
= &pp
->em_priv
[link
->pmp
];
1118 rc
+= sprintf(buf
, "%lx\n", emp
->led_state
);
1123 static ssize_t
ahci_led_store(struct ata_port
*ap
, const char *buf
,
1128 struct ahci_port_priv
*pp
= ap
->private_data
;
1129 struct ahci_em_priv
*emp
;
1131 if (kstrtouint(buf
, 0, &state
) < 0)
1134 /* get the slot number from the message */
1135 pmp
= (state
& EM_MSG_LED_PMP_SLOT
) >> 8;
1136 if (pmp
< EM_MAX_SLOTS
) {
1137 pmp
= array_index_nospec(pmp
, EM_MAX_SLOTS
);
1138 emp
= &pp
->em_priv
[pmp
];
1143 /* mask off the activity bits if we are in sw_activity
1144 * mode, user should turn off sw_activity before setting
1145 * activity led through em_message
1147 if (emp
->blink_policy
)
1148 state
&= ~EM_MSG_LED_VALUE_ACTIVITY
;
1150 return ap
->ops
->transmit_led_message(ap
, state
, size
);
1153 static ssize_t
ahci_activity_store(struct ata_device
*dev
, enum sw_activity val
)
1155 struct ata_link
*link
= dev
->link
;
1156 struct ata_port
*ap
= link
->ap
;
1157 struct ahci_port_priv
*pp
= ap
->private_data
;
1158 struct ahci_em_priv
*emp
= &pp
->em_priv
[link
->pmp
];
1159 u32 port_led_state
= emp
->led_state
;
1161 /* save the desired Activity LED behavior */
1164 link
->flags
&= ~(ATA_LFLAG_SW_ACTIVITY
);
1166 /* set the LED to OFF */
1167 port_led_state
&= EM_MSG_LED_VALUE_OFF
;
1168 port_led_state
|= (ap
->port_no
| (link
->pmp
<< 8));
1169 ap
->ops
->transmit_led_message(ap
, port_led_state
, 4);
1171 link
->flags
|= ATA_LFLAG_SW_ACTIVITY
;
1172 if (val
== BLINK_OFF
) {
1173 /* set LED to ON for idle */
1174 port_led_state
&= EM_MSG_LED_VALUE_OFF
;
1175 port_led_state
|= (ap
->port_no
| (link
->pmp
<< 8));
1176 port_led_state
|= EM_MSG_LED_VALUE_ON
; /* check this */
1177 ap
->ops
->transmit_led_message(ap
, port_led_state
, 4);
1180 emp
->blink_policy
= val
;
1184 static ssize_t
ahci_activity_show(struct ata_device
*dev
, char *buf
)
1186 struct ata_link
*link
= dev
->link
;
1187 struct ata_port
*ap
= link
->ap
;
1188 struct ahci_port_priv
*pp
= ap
->private_data
;
1189 struct ahci_em_priv
*emp
= &pp
->em_priv
[link
->pmp
];
1191 /* display the saved value of activity behavior for this
1194 return sprintf(buf
, "%d\n", emp
->blink_policy
);
1197 static void ahci_port_init(struct device
*dev
, struct ata_port
*ap
,
1198 int port_no
, void __iomem
*mmio
,
1199 void __iomem
*port_mmio
)
1201 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
1202 const char *emsg
= NULL
;
1206 /* make sure port is not active */
1207 rc
= ahci_deinit_port(ap
, &emsg
);
1209 dev_warn(dev
, "%s (%d)\n", emsg
, rc
);
1212 tmp
= readl(port_mmio
+ PORT_SCR_ERR
);
1213 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp
);
1214 writel(tmp
, port_mmio
+ PORT_SCR_ERR
);
1216 /* clear port IRQ */
1217 tmp
= readl(port_mmio
+ PORT_IRQ_STAT
);
1218 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp
);
1220 writel(tmp
, port_mmio
+ PORT_IRQ_STAT
);
1222 writel(1 << port_no
, mmio
+ HOST_IRQ_STAT
);
1224 /* mark esata ports */
1225 tmp
= readl(port_mmio
+ PORT_CMD
);
1226 if ((tmp
& PORT_CMD_ESP
) && (hpriv
->cap
& HOST_CAP_SXS
))
1227 ap
->pflags
|= ATA_PFLAG_EXTERNAL
;
1230 void ahci_init_controller(struct ata_host
*host
)
1232 struct ahci_host_priv
*hpriv
= host
->private_data
;
1233 void __iomem
*mmio
= hpriv
->mmio
;
1235 void __iomem
*port_mmio
;
1238 for (i
= 0; i
< host
->n_ports
; i
++) {
1239 struct ata_port
*ap
= host
->ports
[i
];
1241 port_mmio
= ahci_port_base(ap
);
1242 if (ata_port_is_dummy(ap
))
1245 ahci_port_init(host
->dev
, ap
, i
, mmio
, port_mmio
);
1248 tmp
= readl(mmio
+ HOST_CTL
);
1249 VPRINTK("HOST_CTL 0x%x\n", tmp
);
1250 writel(tmp
| HOST_IRQ_EN
, mmio
+ HOST_CTL
);
1251 tmp
= readl(mmio
+ HOST_CTL
);
1252 VPRINTK("HOST_CTL 0x%x\n", tmp
);
1254 EXPORT_SYMBOL_GPL(ahci_init_controller
);
1256 static void ahci_dev_config(struct ata_device
*dev
)
1258 struct ahci_host_priv
*hpriv
= dev
->link
->ap
->host
->private_data
;
1260 if (hpriv
->flags
& AHCI_HFLAG_SECT255
) {
1261 dev
->max_sectors
= 255;
1263 "SB600 AHCI: limiting to 255 sectors per cmd\n");
1267 unsigned int ahci_dev_classify(struct ata_port
*ap
)
1269 void __iomem
*port_mmio
= ahci_port_base(ap
);
1270 struct ata_taskfile tf
;
1273 tmp
= readl(port_mmio
+ PORT_SIG
);
1274 tf
.lbah
= (tmp
>> 24) & 0xff;
1275 tf
.lbam
= (tmp
>> 16) & 0xff;
1276 tf
.lbal
= (tmp
>> 8) & 0xff;
1277 tf
.nsect
= (tmp
) & 0xff;
1279 return ata_dev_classify(&tf
);
1281 EXPORT_SYMBOL_GPL(ahci_dev_classify
);
1283 void ahci_fill_cmd_slot(struct ahci_port_priv
*pp
, unsigned int tag
,
1286 dma_addr_t cmd_tbl_dma
;
1288 cmd_tbl_dma
= pp
->cmd_tbl_dma
+ tag
* AHCI_CMD_TBL_SZ
;
1290 pp
->cmd_slot
[tag
].opts
= cpu_to_le32(opts
);
1291 pp
->cmd_slot
[tag
].status
= 0;
1292 pp
->cmd_slot
[tag
].tbl_addr
= cpu_to_le32(cmd_tbl_dma
& 0xffffffff);
1293 pp
->cmd_slot
[tag
].tbl_addr_hi
= cpu_to_le32((cmd_tbl_dma
>> 16) >> 16);
1295 EXPORT_SYMBOL_GPL(ahci_fill_cmd_slot
);
1297 int ahci_kick_engine(struct ata_port
*ap
)
1299 void __iomem
*port_mmio
= ahci_port_base(ap
);
1300 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
1301 u8 status
= readl(port_mmio
+ PORT_TFDATA
) & 0xFF;
1306 rc
= hpriv
->stop_engine(ap
);
1311 * always do CLO if PMP is attached (AHCI-1.3 9.2)
1313 busy
= status
& (ATA_BUSY
| ATA_DRQ
);
1314 if (!busy
&& !sata_pmp_attached(ap
)) {
1319 if (!(hpriv
->cap
& HOST_CAP_CLO
)) {
1325 tmp
= readl(port_mmio
+ PORT_CMD
);
1326 tmp
|= PORT_CMD_CLO
;
1327 writel(tmp
, port_mmio
+ PORT_CMD
);
1330 tmp
= ata_wait_register(ap
, port_mmio
+ PORT_CMD
,
1331 PORT_CMD_CLO
, PORT_CMD_CLO
, 1, 500);
1332 if (tmp
& PORT_CMD_CLO
)
1335 /* restart engine */
1337 hpriv
->start_engine(ap
);
1340 EXPORT_SYMBOL_GPL(ahci_kick_engine
);
1342 static int ahci_exec_polled_cmd(struct ata_port
*ap
, int pmp
,
1343 struct ata_taskfile
*tf
, int is_cmd
, u16 flags
,
1344 unsigned long timeout_msec
)
1346 const u32 cmd_fis_len
= 5; /* five dwords */
1347 struct ahci_port_priv
*pp
= ap
->private_data
;
1348 void __iomem
*port_mmio
= ahci_port_base(ap
);
1349 u8
*fis
= pp
->cmd_tbl
;
1352 /* prep the command */
1353 ata_tf_to_fis(tf
, pmp
, is_cmd
, fis
);
1354 ahci_fill_cmd_slot(pp
, 0, cmd_fis_len
| flags
| (pmp
<< 12));
1356 /* set port value for softreset of Port Multiplier */
1357 if (pp
->fbs_enabled
&& pp
->fbs_last_dev
!= pmp
) {
1358 tmp
= readl(port_mmio
+ PORT_FBS
);
1359 tmp
&= ~(PORT_FBS_DEV_MASK
| PORT_FBS_DEC
);
1360 tmp
|= pmp
<< PORT_FBS_DEV_OFFSET
;
1361 writel(tmp
, port_mmio
+ PORT_FBS
);
1362 pp
->fbs_last_dev
= pmp
;
1366 writel(1, port_mmio
+ PORT_CMD_ISSUE
);
1369 tmp
= ata_wait_register(ap
, port_mmio
+ PORT_CMD_ISSUE
,
1370 0x1, 0x1, 1, timeout_msec
);
1372 ahci_kick_engine(ap
);
1376 readl(port_mmio
+ PORT_CMD_ISSUE
); /* flush */
1381 int ahci_do_softreset(struct ata_link
*link
, unsigned int *class,
1382 int pmp
, unsigned long deadline
,
1383 int (*check_ready
)(struct ata_link
*link
))
1385 struct ata_port
*ap
= link
->ap
;
1386 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
1387 struct ahci_port_priv
*pp
= ap
->private_data
;
1388 const char *reason
= NULL
;
1389 unsigned long now
, msecs
;
1390 struct ata_taskfile tf
;
1391 bool fbs_disabled
= false;
1396 /* prepare for SRST (AHCI-1.1 10.4.1) */
1397 rc
= ahci_kick_engine(ap
);
1398 if (rc
&& rc
!= -EOPNOTSUPP
)
1399 ata_link_warn(link
, "failed to reset engine (errno=%d)\n", rc
);
1402 * According to AHCI-1.2 9.3.9: if FBS is enable, software shall
1403 * clear PxFBS.EN to '0' prior to issuing software reset to devices
1404 * that is attached to port multiplier.
1406 if (!ata_is_host_link(link
) && pp
->fbs_enabled
) {
1407 ahci_disable_fbs(ap
);
1408 fbs_disabled
= true;
1411 ata_tf_init(link
->device
, &tf
);
1413 /* issue the first H2D Register FIS */
1416 if (time_after(deadline
, now
))
1417 msecs
= jiffies_to_msecs(deadline
- now
);
1420 if (ahci_exec_polled_cmd(ap
, pmp
, &tf
, 0,
1421 AHCI_CMD_RESET
| AHCI_CMD_CLR_BUSY
, msecs
)) {
1423 reason
= "1st FIS failed";
1427 /* spec says at least 5us, but be generous and sleep for 1ms */
1430 /* issue the second H2D Register FIS */
1431 tf
.ctl
&= ~ATA_SRST
;
1432 ahci_exec_polled_cmd(ap
, pmp
, &tf
, 0, 0, 0);
1434 /* wait for link to become ready */
1435 rc
= ata_wait_after_reset(link
, deadline
, check_ready
);
1436 if (rc
== -EBUSY
&& hpriv
->flags
& AHCI_HFLAG_SRST_TOUT_IS_OFFLINE
) {
1438 * Workaround for cases where link online status can't
1439 * be trusted. Treat device readiness timeout as link
1442 ata_link_info(link
, "device not ready, treating as offline\n");
1443 *class = ATA_DEV_NONE
;
1445 /* link occupied, -ENODEV too is an error */
1446 reason
= "device not ready";
1449 *class = ahci_dev_classify(ap
);
1451 /* re-enable FBS if disabled before */
1453 ahci_enable_fbs(ap
);
1455 DPRINTK("EXIT, class=%u\n", *class);
1459 ata_link_err(link
, "softreset failed (%s)\n", reason
);
1463 int ahci_check_ready(struct ata_link
*link
)
1465 void __iomem
*port_mmio
= ahci_port_base(link
->ap
);
1466 u8 status
= readl(port_mmio
+ PORT_TFDATA
) & 0xFF;
1468 return ata_check_ready(status
);
1470 EXPORT_SYMBOL_GPL(ahci_check_ready
);
1472 static int ahci_softreset(struct ata_link
*link
, unsigned int *class,
1473 unsigned long deadline
)
1475 int pmp
= sata_srst_pmp(link
);
1479 return ahci_do_softreset(link
, class, pmp
, deadline
, ahci_check_ready
);
1481 EXPORT_SYMBOL_GPL(ahci_do_softreset
);
1483 static int ahci_bad_pmp_check_ready(struct ata_link
*link
)
1485 void __iomem
*port_mmio
= ahci_port_base(link
->ap
);
1486 u8 status
= readl(port_mmio
+ PORT_TFDATA
) & 0xFF;
1487 u32 irq_status
= readl(port_mmio
+ PORT_IRQ_STAT
);
1490 * There is no need to check TFDATA if BAD PMP is found due to HW bug,
1491 * which can save timeout delay.
1493 if (irq_status
& PORT_IRQ_BAD_PMP
)
1496 return ata_check_ready(status
);
1499 static int ahci_pmp_retry_softreset(struct ata_link
*link
, unsigned int *class,
1500 unsigned long deadline
)
1502 struct ata_port
*ap
= link
->ap
;
1503 void __iomem
*port_mmio
= ahci_port_base(ap
);
1504 int pmp
= sata_srst_pmp(link
);
1510 rc
= ahci_do_softreset(link
, class, pmp
, deadline
,
1511 ahci_bad_pmp_check_ready
);
1514 * Soft reset fails with IPMS set when PMP is enabled but
1515 * SATA HDD/ODD is connected to SATA port, do soft reset
1519 irq_sts
= readl(port_mmio
+ PORT_IRQ_STAT
);
1520 if (irq_sts
& PORT_IRQ_BAD_PMP
) {
1522 "applying PMP SRST workaround "
1524 rc
= ahci_do_softreset(link
, class, 0, deadline
,
1532 int ahci_do_hardreset(struct ata_link
*link
, unsigned int *class,
1533 unsigned long deadline
, bool *online
)
1535 const unsigned long *timing
= sata_ehc_deb_timing(&link
->eh_context
);
1536 struct ata_port
*ap
= link
->ap
;
1537 struct ahci_port_priv
*pp
= ap
->private_data
;
1538 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
1539 u8
*d2h_fis
= pp
->rx_fis
+ RX_FIS_D2H_REG
;
1540 struct ata_taskfile tf
;
1545 hpriv
->stop_engine(ap
);
1547 /* clear D2H reception area to properly wait for D2H FIS */
1548 ata_tf_init(link
->device
, &tf
);
1549 tf
.command
= ATA_BUSY
;
1550 ata_tf_to_fis(&tf
, 0, 0, d2h_fis
);
1552 rc
= sata_link_hardreset(link
, timing
, deadline
, online
,
1555 hpriv
->start_engine(ap
);
1558 *class = ahci_dev_classify(ap
);
1560 DPRINTK("EXIT, rc=%d, class=%u\n", rc
, *class);
1563 EXPORT_SYMBOL_GPL(ahci_do_hardreset
);
1565 static int ahci_hardreset(struct ata_link
*link
, unsigned int *class,
1566 unsigned long deadline
)
1570 return ahci_do_hardreset(link
, class, deadline
, &online
);
1573 static void ahci_postreset(struct ata_link
*link
, unsigned int *class)
1575 struct ata_port
*ap
= link
->ap
;
1576 void __iomem
*port_mmio
= ahci_port_base(ap
);
1579 ata_std_postreset(link
, class);
1581 /* Make sure port's ATAPI bit is set appropriately */
1582 new_tmp
= tmp
= readl(port_mmio
+ PORT_CMD
);
1583 if (*class == ATA_DEV_ATAPI
)
1584 new_tmp
|= PORT_CMD_ATAPI
;
1586 new_tmp
&= ~PORT_CMD_ATAPI
;
1587 if (new_tmp
!= tmp
) {
1588 writel(new_tmp
, port_mmio
+ PORT_CMD
);
1589 readl(port_mmio
+ PORT_CMD
); /* flush */
1593 static unsigned int ahci_fill_sg(struct ata_queued_cmd
*qc
, void *cmd_tbl
)
1595 struct scatterlist
*sg
;
1596 struct ahci_sg
*ahci_sg
= cmd_tbl
+ AHCI_CMD_TBL_HDR_SZ
;
1602 * Next, the S/G list.
1604 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
1605 dma_addr_t addr
= sg_dma_address(sg
);
1606 u32 sg_len
= sg_dma_len(sg
);
1608 ahci_sg
[si
].addr
= cpu_to_le32(addr
& 0xffffffff);
1609 ahci_sg
[si
].addr_hi
= cpu_to_le32((addr
>> 16) >> 16);
1610 ahci_sg
[si
].flags_size
= cpu_to_le32(sg_len
- 1);
1616 static int ahci_pmp_qc_defer(struct ata_queued_cmd
*qc
)
1618 struct ata_port
*ap
= qc
->ap
;
1619 struct ahci_port_priv
*pp
= ap
->private_data
;
1621 if (!sata_pmp_attached(ap
) || pp
->fbs_enabled
)
1622 return ata_std_qc_defer(qc
);
1624 return sata_pmp_qc_defer_cmd_switch(qc
);
1627 static void ahci_qc_prep(struct ata_queued_cmd
*qc
)
1629 struct ata_port
*ap
= qc
->ap
;
1630 struct ahci_port_priv
*pp
= ap
->private_data
;
1631 int is_atapi
= ata_is_atapi(qc
->tf
.protocol
);
1634 const u32 cmd_fis_len
= 5; /* five dwords */
1635 unsigned int n_elem
;
1638 * Fill in command table information. First, the header,
1639 * a SATA Register - Host to Device command FIS.
1641 cmd_tbl
= pp
->cmd_tbl
+ qc
->hw_tag
* AHCI_CMD_TBL_SZ
;
1643 ata_tf_to_fis(&qc
->tf
, qc
->dev
->link
->pmp
, 1, cmd_tbl
);
1645 memset(cmd_tbl
+ AHCI_CMD_TBL_CDB
, 0, 32);
1646 memcpy(cmd_tbl
+ AHCI_CMD_TBL_CDB
, qc
->cdb
, qc
->dev
->cdb_len
);
1650 if (qc
->flags
& ATA_QCFLAG_DMAMAP
)
1651 n_elem
= ahci_fill_sg(qc
, cmd_tbl
);
1654 * Fill in command slot information.
1656 opts
= cmd_fis_len
| n_elem
<< 16 | (qc
->dev
->link
->pmp
<< 12);
1657 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
)
1658 opts
|= AHCI_CMD_WRITE
;
1660 opts
|= AHCI_CMD_ATAPI
| AHCI_CMD_PREFETCH
;
1662 ahci_fill_cmd_slot(pp
, qc
->hw_tag
, opts
);
1665 static void ahci_fbs_dec_intr(struct ata_port
*ap
)
1667 struct ahci_port_priv
*pp
= ap
->private_data
;
1668 void __iomem
*port_mmio
= ahci_port_base(ap
);
1669 u32 fbs
= readl(port_mmio
+ PORT_FBS
);
1673 BUG_ON(!pp
->fbs_enabled
);
1675 /* time to wait for DEC is not specified by AHCI spec,
1676 * add a retry loop for safety.
1678 writel(fbs
| PORT_FBS_DEC
, port_mmio
+ PORT_FBS
);
1679 fbs
= readl(port_mmio
+ PORT_FBS
);
1680 while ((fbs
& PORT_FBS_DEC
) && retries
--) {
1682 fbs
= readl(port_mmio
+ PORT_FBS
);
1685 if (fbs
& PORT_FBS_DEC
)
1686 dev_err(ap
->host
->dev
, "failed to clear device error\n");
1689 static void ahci_error_intr(struct ata_port
*ap
, u32 irq_stat
)
1691 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
1692 struct ahci_port_priv
*pp
= ap
->private_data
;
1693 struct ata_eh_info
*host_ehi
= &ap
->link
.eh_info
;
1694 struct ata_link
*link
= NULL
;
1695 struct ata_queued_cmd
*active_qc
;
1696 struct ata_eh_info
*active_ehi
;
1697 bool fbs_need_dec
= false;
1700 /* determine active link with error */
1701 if (pp
->fbs_enabled
) {
1702 void __iomem
*port_mmio
= ahci_port_base(ap
);
1703 u32 fbs
= readl(port_mmio
+ PORT_FBS
);
1704 int pmp
= fbs
>> PORT_FBS_DWE_OFFSET
;
1706 if ((fbs
& PORT_FBS_SDE
) && (pmp
< ap
->nr_pmp_links
)) {
1707 link
= &ap
->pmp_link
[pmp
];
1708 fbs_need_dec
= true;
1712 ata_for_each_link(link
, ap
, EDGE
)
1713 if (ata_link_active(link
))
1719 active_qc
= ata_qc_from_tag(ap
, link
->active_tag
);
1720 active_ehi
= &link
->eh_info
;
1722 /* record irq stat */
1723 ata_ehi_clear_desc(host_ehi
);
1724 ata_ehi_push_desc(host_ehi
, "irq_stat 0x%08x", irq_stat
);
1726 /* AHCI needs SError cleared; otherwise, it might lock up */
1727 ahci_scr_read(&ap
->link
, SCR_ERROR
, &serror
);
1728 ahci_scr_write(&ap
->link
, SCR_ERROR
, serror
);
1729 host_ehi
->serror
|= serror
;
1731 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
1732 if (hpriv
->flags
& AHCI_HFLAG_IGN_IRQ_IF_ERR
)
1733 irq_stat
&= ~PORT_IRQ_IF_ERR
;
1735 if (irq_stat
& PORT_IRQ_TF_ERR
) {
1736 /* If qc is active, charge it; otherwise, the active
1737 * link. There's no active qc on NCQ errors. It will
1738 * be determined by EH by reading log page 10h.
1741 active_qc
->err_mask
|= AC_ERR_DEV
;
1743 active_ehi
->err_mask
|= AC_ERR_DEV
;
1745 if (hpriv
->flags
& AHCI_HFLAG_IGN_SERR_INTERNAL
)
1746 host_ehi
->serror
&= ~SERR_INTERNAL
;
1749 if (irq_stat
& PORT_IRQ_UNK_FIS
) {
1750 u32
*unk
= pp
->rx_fis
+ RX_FIS_UNK
;
1752 active_ehi
->err_mask
|= AC_ERR_HSM
;
1753 active_ehi
->action
|= ATA_EH_RESET
;
1754 ata_ehi_push_desc(active_ehi
,
1755 "unknown FIS %08x %08x %08x %08x" ,
1756 unk
[0], unk
[1], unk
[2], unk
[3]);
1759 if (sata_pmp_attached(ap
) && (irq_stat
& PORT_IRQ_BAD_PMP
)) {
1760 active_ehi
->err_mask
|= AC_ERR_HSM
;
1761 active_ehi
->action
|= ATA_EH_RESET
;
1762 ata_ehi_push_desc(active_ehi
, "incorrect PMP");
1765 if (irq_stat
& (PORT_IRQ_HBUS_ERR
| PORT_IRQ_HBUS_DATA_ERR
)) {
1766 host_ehi
->err_mask
|= AC_ERR_HOST_BUS
;
1767 host_ehi
->action
|= ATA_EH_RESET
;
1768 ata_ehi_push_desc(host_ehi
, "host bus error");
1771 if (irq_stat
& PORT_IRQ_IF_ERR
) {
1773 active_ehi
->err_mask
|= AC_ERR_DEV
;
1775 host_ehi
->err_mask
|= AC_ERR_ATA_BUS
;
1776 host_ehi
->action
|= ATA_EH_RESET
;
1779 ata_ehi_push_desc(host_ehi
, "interface fatal error");
1782 if (irq_stat
& (PORT_IRQ_CONNECT
| PORT_IRQ_PHYRDY
)) {
1783 ata_ehi_hotplugged(host_ehi
);
1784 ata_ehi_push_desc(host_ehi
, "%s",
1785 irq_stat
& PORT_IRQ_CONNECT
?
1786 "connection status changed" : "PHY RDY changed");
1789 /* okay, let's hand over to EH */
1791 if (irq_stat
& PORT_IRQ_FREEZE
)
1792 ata_port_freeze(ap
);
1793 else if (fbs_need_dec
) {
1794 ata_link_abort(link
);
1795 ahci_fbs_dec_intr(ap
);
1800 static void ahci_handle_port_interrupt(struct ata_port
*ap
,
1801 void __iomem
*port_mmio
, u32 status
)
1803 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
1804 struct ahci_port_priv
*pp
= ap
->private_data
;
1805 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
1806 int resetting
= !!(ap
->pflags
& ATA_PFLAG_RESETTING
);
1810 /* ignore BAD_PMP while resetting */
1811 if (unlikely(resetting
))
1812 status
&= ~PORT_IRQ_BAD_PMP
;
1814 if (sata_lpm_ignore_phy_events(&ap
->link
)) {
1815 status
&= ~PORT_IRQ_PHYRDY
;
1816 ahci_scr_write(&ap
->link
, SCR_ERROR
, SERR_PHYRDY_CHG
);
1819 if (unlikely(status
& PORT_IRQ_ERROR
)) {
1820 ahci_error_intr(ap
, status
);
1824 if (status
& PORT_IRQ_SDB_FIS
) {
1825 /* If SNotification is available, leave notification
1826 * handling to sata_async_notification(). If not,
1827 * emulate it by snooping SDB FIS RX area.
1829 * Snooping FIS RX area is probably cheaper than
1830 * poking SNotification but some constrollers which
1831 * implement SNotification, ICH9 for example, don't
1832 * store AN SDB FIS into receive area.
1834 if (hpriv
->cap
& HOST_CAP_SNTF
)
1835 sata_async_notification(ap
);
1837 /* If the 'N' bit in word 0 of the FIS is set,
1838 * we just received asynchronous notification.
1839 * Tell libata about it.
1841 * Lack of SNotification should not appear in
1842 * ahci 1.2, so the workaround is unnecessary
1843 * when FBS is enabled.
1845 if (pp
->fbs_enabled
)
1848 const __le32
*f
= pp
->rx_fis
+ RX_FIS_SDB
;
1849 u32 f0
= le32_to_cpu(f
[0]);
1851 sata_async_notification(ap
);
1856 /* pp->active_link is not reliable once FBS is enabled, both
1857 * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
1858 * NCQ and non-NCQ commands may be in flight at the same time.
1860 if (pp
->fbs_enabled
) {
1861 if (ap
->qc_active
) {
1862 qc_active
= readl(port_mmio
+ PORT_SCR_ACT
);
1863 qc_active
|= readl(port_mmio
+ PORT_CMD_ISSUE
);
1866 /* pp->active_link is valid iff any command is in flight */
1867 if (ap
->qc_active
&& pp
->active_link
->sactive
)
1868 qc_active
= readl(port_mmio
+ PORT_SCR_ACT
);
1870 qc_active
= readl(port_mmio
+ PORT_CMD_ISSUE
);
1874 rc
= ata_qc_complete_multiple(ap
, qc_active
);
1876 /* while resetting, invalid completions are expected */
1877 if (unlikely(rc
< 0 && !resetting
)) {
1878 ehi
->err_mask
|= AC_ERR_HSM
;
1879 ehi
->action
|= ATA_EH_RESET
;
1880 ata_port_freeze(ap
);
1884 static void ahci_port_intr(struct ata_port
*ap
)
1886 void __iomem
*port_mmio
= ahci_port_base(ap
);
1889 status
= readl(port_mmio
+ PORT_IRQ_STAT
);
1890 writel(status
, port_mmio
+ PORT_IRQ_STAT
);
1892 ahci_handle_port_interrupt(ap
, port_mmio
, status
);
1895 static irqreturn_t
ahci_multi_irqs_intr_hard(int irq
, void *dev_instance
)
1897 struct ata_port
*ap
= dev_instance
;
1898 void __iomem
*port_mmio
= ahci_port_base(ap
);
1903 status
= readl(port_mmio
+ PORT_IRQ_STAT
);
1904 writel(status
, port_mmio
+ PORT_IRQ_STAT
);
1906 spin_lock(ap
->lock
);
1907 ahci_handle_port_interrupt(ap
, port_mmio
, status
);
1908 spin_unlock(ap
->lock
);
1915 u32
ahci_handle_port_intr(struct ata_host
*host
, u32 irq_masked
)
1917 unsigned int i
, handled
= 0;
1919 for (i
= 0; i
< host
->n_ports
; i
++) {
1920 struct ata_port
*ap
;
1922 if (!(irq_masked
& (1 << i
)))
1925 ap
= host
->ports
[i
];
1928 VPRINTK("port %u\n", i
);
1930 VPRINTK("port %u (no irq)\n", i
);
1931 if (ata_ratelimit())
1933 "interrupt on disabled port %u\n", i
);
1941 EXPORT_SYMBOL_GPL(ahci_handle_port_intr
);
1943 static irqreturn_t
ahci_single_level_irq_intr(int irq
, void *dev_instance
)
1945 struct ata_host
*host
= dev_instance
;
1946 struct ahci_host_priv
*hpriv
;
1947 unsigned int rc
= 0;
1949 u32 irq_stat
, irq_masked
;
1953 hpriv
= host
->private_data
;
1956 /* sigh. 0xffffffff is a valid return from h/w */
1957 irq_stat
= readl(mmio
+ HOST_IRQ_STAT
);
1961 irq_masked
= irq_stat
& hpriv
->port_map
;
1963 spin_lock(&host
->lock
);
1965 rc
= ahci_handle_port_intr(host
, irq_masked
);
1967 /* HOST_IRQ_STAT behaves as level triggered latch meaning that
1968 * it should be cleared after all the port events are cleared;
1969 * otherwise, it will raise a spurious interrupt after each
1970 * valid one. Please read section 10.6.2 of ahci 1.1 for more
1973 * Also, use the unmasked value to clear interrupt as spurious
1974 * pending event on a dummy port might cause screaming IRQ.
1976 writel(irq_stat
, mmio
+ HOST_IRQ_STAT
);
1978 spin_unlock(&host
->lock
);
1982 return IRQ_RETVAL(rc
);
1985 unsigned int ahci_qc_issue(struct ata_queued_cmd
*qc
)
1987 struct ata_port
*ap
= qc
->ap
;
1988 void __iomem
*port_mmio
= ahci_port_base(ap
);
1989 struct ahci_port_priv
*pp
= ap
->private_data
;
1991 /* Keep track of the currently active link. It will be used
1992 * in completion path to determine whether NCQ phase is in
1995 pp
->active_link
= qc
->dev
->link
;
1997 if (ata_is_ncq(qc
->tf
.protocol
))
1998 writel(1 << qc
->hw_tag
, port_mmio
+ PORT_SCR_ACT
);
2000 if (pp
->fbs_enabled
&& pp
->fbs_last_dev
!= qc
->dev
->link
->pmp
) {
2001 u32 fbs
= readl(port_mmio
+ PORT_FBS
);
2002 fbs
&= ~(PORT_FBS_DEV_MASK
| PORT_FBS_DEC
);
2003 fbs
|= qc
->dev
->link
->pmp
<< PORT_FBS_DEV_OFFSET
;
2004 writel(fbs
, port_mmio
+ PORT_FBS
);
2005 pp
->fbs_last_dev
= qc
->dev
->link
->pmp
;
2008 writel(1 << qc
->hw_tag
, port_mmio
+ PORT_CMD_ISSUE
);
2010 ahci_sw_activity(qc
->dev
->link
);
2014 EXPORT_SYMBOL_GPL(ahci_qc_issue
);
2016 static bool ahci_qc_fill_rtf(struct ata_queued_cmd
*qc
)
2018 struct ahci_port_priv
*pp
= qc
->ap
->private_data
;
2019 u8
*rx_fis
= pp
->rx_fis
;
2021 if (pp
->fbs_enabled
)
2022 rx_fis
+= qc
->dev
->link
->pmp
* AHCI_RX_FIS_SZ
;
2025 * After a successful execution of an ATA PIO data-in command,
2026 * the device doesn't send D2H Reg FIS to update the TF and
2027 * the host should take TF and E_Status from the preceding PIO
2030 if (qc
->tf
.protocol
== ATA_PROT_PIO
&& qc
->dma_dir
== DMA_FROM_DEVICE
&&
2031 !(qc
->flags
& ATA_QCFLAG_FAILED
)) {
2032 ata_tf_from_fis(rx_fis
+ RX_FIS_PIO_SETUP
, &qc
->result_tf
);
2033 qc
->result_tf
.command
= (rx_fis
+ RX_FIS_PIO_SETUP
)[15];
2035 ata_tf_from_fis(rx_fis
+ RX_FIS_D2H_REG
, &qc
->result_tf
);
2040 static void ahci_freeze(struct ata_port
*ap
)
2042 void __iomem
*port_mmio
= ahci_port_base(ap
);
2045 writel(0, port_mmio
+ PORT_IRQ_MASK
);
2048 static void ahci_thaw(struct ata_port
*ap
)
2050 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
2051 void __iomem
*mmio
= hpriv
->mmio
;
2052 void __iomem
*port_mmio
= ahci_port_base(ap
);
2054 struct ahci_port_priv
*pp
= ap
->private_data
;
2057 tmp
= readl(port_mmio
+ PORT_IRQ_STAT
);
2058 writel(tmp
, port_mmio
+ PORT_IRQ_STAT
);
2059 writel(1 << ap
->port_no
, mmio
+ HOST_IRQ_STAT
);
2061 /* turn IRQ back on */
2062 writel(pp
->intr_mask
, port_mmio
+ PORT_IRQ_MASK
);
2065 void ahci_error_handler(struct ata_port
*ap
)
2067 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
2069 if (!(ap
->pflags
& ATA_PFLAG_FROZEN
)) {
2070 /* restart engine */
2071 hpriv
->stop_engine(ap
);
2072 hpriv
->start_engine(ap
);
2075 sata_pmp_error_handler(ap
);
2077 if (!ata_dev_enabled(ap
->link
.device
))
2078 hpriv
->stop_engine(ap
);
2080 EXPORT_SYMBOL_GPL(ahci_error_handler
);
2082 static void ahci_post_internal_cmd(struct ata_queued_cmd
*qc
)
2084 struct ata_port
*ap
= qc
->ap
;
2086 /* make DMA engine forget about the failed command */
2087 if (qc
->flags
& ATA_QCFLAG_FAILED
)
2088 ahci_kick_engine(ap
);
2091 static void ahci_set_aggressive_devslp(struct ata_port
*ap
, bool sleep
)
2093 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
2094 void __iomem
*port_mmio
= ahci_port_base(ap
);
2095 struct ata_device
*dev
= ap
->link
.device
;
2096 u32 devslp
, dm
, dito
, mdat
, deto
, dito_conf
;
2098 unsigned int err_mask
;
2100 devslp
= readl(port_mmio
+ PORT_DEVSLP
);
2101 if (!(devslp
& PORT_DEVSLP_DSP
)) {
2102 dev_info(ap
->host
->dev
, "port does not support device sleep\n");
2106 /* disable device sleep */
2108 if (devslp
& PORT_DEVSLP_ADSE
) {
2109 writel(devslp
& ~PORT_DEVSLP_ADSE
,
2110 port_mmio
+ PORT_DEVSLP
);
2111 err_mask
= ata_dev_set_feature(dev
,
2112 SETFEATURES_SATA_DISABLE
,
2114 if (err_mask
&& err_mask
!= AC_ERR_DEV
)
2115 ata_dev_warn(dev
, "failed to disable DEVSLP\n");
2120 dm
= (devslp
& PORT_DEVSLP_DM_MASK
) >> PORT_DEVSLP_DM_OFFSET
;
2121 dito
= devslp_idle_timeout
/ (dm
+ 1);
2125 dito_conf
= (devslp
>> PORT_DEVSLP_DITO_OFFSET
) & 0x3FF;
2127 /* device sleep was already enabled and same dito */
2128 if ((devslp
& PORT_DEVSLP_ADSE
) && (dito_conf
== dito
))
2131 /* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */
2132 rc
= hpriv
->stop_engine(ap
);
2136 /* Use the nominal value 10 ms if the read MDAT is zero,
2137 * the nominal value of DETO is 20 ms.
2139 if (dev
->devslp_timing
[ATA_LOG_DEVSLP_VALID
] &
2140 ATA_LOG_DEVSLP_VALID_MASK
) {
2141 mdat
= dev
->devslp_timing
[ATA_LOG_DEVSLP_MDAT
] &
2142 ATA_LOG_DEVSLP_MDAT_MASK
;
2145 deto
= dev
->devslp_timing
[ATA_LOG_DEVSLP_DETO
];
2153 /* Make dito, mdat, deto bits to 0s */
2154 devslp
&= ~GENMASK_ULL(24, 2);
2155 devslp
|= ((dito
<< PORT_DEVSLP_DITO_OFFSET
) |
2156 (mdat
<< PORT_DEVSLP_MDAT_OFFSET
) |
2157 (deto
<< PORT_DEVSLP_DETO_OFFSET
) |
2159 writel(devslp
, port_mmio
+ PORT_DEVSLP
);
2161 hpriv
->start_engine(ap
);
2163 /* enable device sleep feature for the drive */
2164 err_mask
= ata_dev_set_feature(dev
,
2165 SETFEATURES_SATA_ENABLE
,
2167 if (err_mask
&& err_mask
!= AC_ERR_DEV
)
2168 ata_dev_warn(dev
, "failed to enable DEVSLP\n");
2171 static void ahci_enable_fbs(struct ata_port
*ap
)
2173 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
2174 struct ahci_port_priv
*pp
= ap
->private_data
;
2175 void __iomem
*port_mmio
= ahci_port_base(ap
);
2179 if (!pp
->fbs_supported
)
2182 fbs
= readl(port_mmio
+ PORT_FBS
);
2183 if (fbs
& PORT_FBS_EN
) {
2184 pp
->fbs_enabled
= true;
2185 pp
->fbs_last_dev
= -1; /* initialization */
2189 rc
= hpriv
->stop_engine(ap
);
2193 writel(fbs
| PORT_FBS_EN
, port_mmio
+ PORT_FBS
);
2194 fbs
= readl(port_mmio
+ PORT_FBS
);
2195 if (fbs
& PORT_FBS_EN
) {
2196 dev_info(ap
->host
->dev
, "FBS is enabled\n");
2197 pp
->fbs_enabled
= true;
2198 pp
->fbs_last_dev
= -1; /* initialization */
2200 dev_err(ap
->host
->dev
, "Failed to enable FBS\n");
2202 hpriv
->start_engine(ap
);
2205 static void ahci_disable_fbs(struct ata_port
*ap
)
2207 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
2208 struct ahci_port_priv
*pp
= ap
->private_data
;
2209 void __iomem
*port_mmio
= ahci_port_base(ap
);
2213 if (!pp
->fbs_supported
)
2216 fbs
= readl(port_mmio
+ PORT_FBS
);
2217 if ((fbs
& PORT_FBS_EN
) == 0) {
2218 pp
->fbs_enabled
= false;
2222 rc
= hpriv
->stop_engine(ap
);
2226 writel(fbs
& ~PORT_FBS_EN
, port_mmio
+ PORT_FBS
);
2227 fbs
= readl(port_mmio
+ PORT_FBS
);
2228 if (fbs
& PORT_FBS_EN
)
2229 dev_err(ap
->host
->dev
, "Failed to disable FBS\n");
2231 dev_info(ap
->host
->dev
, "FBS is disabled\n");
2232 pp
->fbs_enabled
= false;
2235 hpriv
->start_engine(ap
);
2238 static void ahci_pmp_attach(struct ata_port
*ap
)
2240 void __iomem
*port_mmio
= ahci_port_base(ap
);
2241 struct ahci_port_priv
*pp
= ap
->private_data
;
2244 cmd
= readl(port_mmio
+ PORT_CMD
);
2245 cmd
|= PORT_CMD_PMP
;
2246 writel(cmd
, port_mmio
+ PORT_CMD
);
2248 ahci_enable_fbs(ap
);
2250 pp
->intr_mask
|= PORT_IRQ_BAD_PMP
;
2253 * We must not change the port interrupt mask register if the
2254 * port is marked frozen, the value in pp->intr_mask will be
2255 * restored later when the port is thawed.
2257 * Note that during initialization, the port is marked as
2258 * frozen since the irq handler is not yet registered.
2260 if (!(ap
->pflags
& ATA_PFLAG_FROZEN
))
2261 writel(pp
->intr_mask
, port_mmio
+ PORT_IRQ_MASK
);
2264 static void ahci_pmp_detach(struct ata_port
*ap
)
2266 void __iomem
*port_mmio
= ahci_port_base(ap
);
2267 struct ahci_port_priv
*pp
= ap
->private_data
;
2270 ahci_disable_fbs(ap
);
2272 cmd
= readl(port_mmio
+ PORT_CMD
);
2273 cmd
&= ~PORT_CMD_PMP
;
2274 writel(cmd
, port_mmio
+ PORT_CMD
);
2276 pp
->intr_mask
&= ~PORT_IRQ_BAD_PMP
;
2278 /* see comment above in ahci_pmp_attach() */
2279 if (!(ap
->pflags
& ATA_PFLAG_FROZEN
))
2280 writel(pp
->intr_mask
, port_mmio
+ PORT_IRQ_MASK
);
2283 int ahci_port_resume(struct ata_port
*ap
)
2285 ahci_rpm_get_port(ap
);
2288 ahci_start_port(ap
);
2290 if (sata_pmp_attached(ap
))
2291 ahci_pmp_attach(ap
);
2293 ahci_pmp_detach(ap
);
2297 EXPORT_SYMBOL_GPL(ahci_port_resume
);
2300 static int ahci_port_suspend(struct ata_port
*ap
, pm_message_t mesg
)
2302 const char *emsg
= NULL
;
2305 rc
= ahci_deinit_port(ap
, &emsg
);
2307 ahci_power_down(ap
);
2309 ata_port_err(ap
, "%s (%d)\n", emsg
, rc
);
2310 ata_port_freeze(ap
);
2313 ahci_rpm_put_port(ap
);
2318 static int ahci_port_start(struct ata_port
*ap
)
2320 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
2321 struct device
*dev
= ap
->host
->dev
;
2322 struct ahci_port_priv
*pp
;
2325 size_t dma_sz
, rx_fis_sz
;
2327 pp
= devm_kzalloc(dev
, sizeof(*pp
), GFP_KERNEL
);
2331 if (ap
->host
->n_ports
> 1) {
2332 pp
->irq_desc
= devm_kzalloc(dev
, 8, GFP_KERNEL
);
2333 if (!pp
->irq_desc
) {
2334 devm_kfree(dev
, pp
);
2337 snprintf(pp
->irq_desc
, 8,
2338 "%s%d", dev_driver_string(dev
), ap
->port_no
);
2341 /* check FBS capability */
2342 if ((hpriv
->cap
& HOST_CAP_FBS
) && sata_pmp_supported(ap
)) {
2343 void __iomem
*port_mmio
= ahci_port_base(ap
);
2344 u32 cmd
= readl(port_mmio
+ PORT_CMD
);
2345 if (cmd
& PORT_CMD_FBSCP
)
2346 pp
->fbs_supported
= true;
2347 else if (hpriv
->flags
& AHCI_HFLAG_YES_FBS
) {
2348 dev_info(dev
, "port %d can do FBS, forcing FBSCP\n",
2350 pp
->fbs_supported
= true;
2352 dev_warn(dev
, "port %d is not capable of FBS\n",
2356 if (pp
->fbs_supported
) {
2357 dma_sz
= AHCI_PORT_PRIV_FBS_DMA_SZ
;
2358 rx_fis_sz
= AHCI_RX_FIS_SZ
* 16;
2360 dma_sz
= AHCI_PORT_PRIV_DMA_SZ
;
2361 rx_fis_sz
= AHCI_RX_FIS_SZ
;
2364 mem
= dmam_alloc_coherent(dev
, dma_sz
, &mem_dma
, GFP_KERNEL
);
2369 * First item in chunk of DMA memory: 32-slot command table,
2370 * 32 bytes each in size
2373 pp
->cmd_slot_dma
= mem_dma
;
2375 mem
+= AHCI_CMD_SLOT_SZ
;
2376 mem_dma
+= AHCI_CMD_SLOT_SZ
;
2379 * Second item: Received-FIS area
2382 pp
->rx_fis_dma
= mem_dma
;
2385 mem_dma
+= rx_fis_sz
;
2388 * Third item: data area for storing a single command
2389 * and its scatter-gather table
2392 pp
->cmd_tbl_dma
= mem_dma
;
2395 * Save off initial list of interrupts to be enabled.
2396 * This could be changed later
2398 pp
->intr_mask
= DEF_PORT_IRQ
;
2401 * Switch to per-port locking in case each port has its own MSI vector.
2403 if (hpriv
->flags
& AHCI_HFLAG_MULTI_MSI
) {
2404 spin_lock_init(&pp
->lock
);
2405 ap
->lock
= &pp
->lock
;
2408 ap
->private_data
= pp
;
2410 /* engage engines, captain */
2411 return ahci_port_resume(ap
);
2414 static void ahci_port_stop(struct ata_port
*ap
)
2416 const char *emsg
= NULL
;
2417 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
2418 void __iomem
*host_mmio
= hpriv
->mmio
;
2421 /* de-initialize port */
2422 rc
= ahci_deinit_port(ap
, &emsg
);
2424 ata_port_warn(ap
, "%s (%d)\n", emsg
, rc
);
2427 * Clear GHC.IS to prevent stuck INTx after disabling MSI and
2430 writel(1 << ap
->port_no
, host_mmio
+ HOST_IRQ_STAT
);
2432 ahci_rpm_put_port(ap
);
2435 void ahci_print_info(struct ata_host
*host
, const char *scc_s
)
2437 struct ahci_host_priv
*hpriv
= host
->private_data
;
2438 u32 vers
, cap
, cap2
, impl
, speed
;
2439 const char *speed_s
;
2441 vers
= hpriv
->version
;
2444 impl
= hpriv
->port_map
;
2446 speed
= (cap
>> 20) & 0xf;
2449 else if (speed
== 2)
2451 else if (speed
== 3)
2457 "AHCI %02x%02x.%02x%02x "
2458 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2461 (vers
>> 24) & 0xff,
2462 (vers
>> 16) & 0xff,
2466 ((cap
>> 8) & 0x1f) + 1,
2480 cap
& HOST_CAP_64
? "64bit " : "",
2481 cap
& HOST_CAP_NCQ
? "ncq " : "",
2482 cap
& HOST_CAP_SNTF
? "sntf " : "",
2483 cap
& HOST_CAP_MPS
? "ilck " : "",
2484 cap
& HOST_CAP_SSS
? "stag " : "",
2485 cap
& HOST_CAP_ALPM
? "pm " : "",
2486 cap
& HOST_CAP_LED
? "led " : "",
2487 cap
& HOST_CAP_CLO
? "clo " : "",
2488 cap
& HOST_CAP_ONLY
? "only " : "",
2489 cap
& HOST_CAP_PMP
? "pmp " : "",
2490 cap
& HOST_CAP_FBS
? "fbs " : "",
2491 cap
& HOST_CAP_PIO_MULTI
? "pio " : "",
2492 cap
& HOST_CAP_SSC
? "slum " : "",
2493 cap
& HOST_CAP_PART
? "part " : "",
2494 cap
& HOST_CAP_CCC
? "ccc " : "",
2495 cap
& HOST_CAP_EMS
? "ems " : "",
2496 cap
& HOST_CAP_SXS
? "sxs " : "",
2497 cap2
& HOST_CAP2_DESO
? "deso " : "",
2498 cap2
& HOST_CAP2_SADM
? "sadm " : "",
2499 cap2
& HOST_CAP2_SDS
? "sds " : "",
2500 cap2
& HOST_CAP2_APST
? "apst " : "",
2501 cap2
& HOST_CAP2_NVMHCI
? "nvmp " : "",
2502 cap2
& HOST_CAP2_BOH
? "boh " : ""
2505 EXPORT_SYMBOL_GPL(ahci_print_info
);
2507 void ahci_set_em_messages(struct ahci_host_priv
*hpriv
,
2508 struct ata_port_info
*pi
)
2511 void __iomem
*mmio
= hpriv
->mmio
;
2512 u32 em_loc
= readl(mmio
+ HOST_EM_LOC
);
2513 u32 em_ctl
= readl(mmio
+ HOST_EM_CTL
);
2515 if (!ahci_em_messages
|| !(hpriv
->cap
& HOST_CAP_EMS
))
2518 messages
= (em_ctl
& EM_CTRL_MSG_TYPE
) >> 16;
2522 hpriv
->em_loc
= ((em_loc
>> 16) * 4);
2523 hpriv
->em_buf_sz
= ((em_loc
& 0xff) * 4);
2524 hpriv
->em_msg_type
= messages
;
2525 pi
->flags
|= ATA_FLAG_EM
;
2526 if (!(em_ctl
& EM_CTL_ALHD
))
2527 pi
->flags
|= ATA_FLAG_SW_ACTIVITY
;
2530 EXPORT_SYMBOL_GPL(ahci_set_em_messages
);
2532 static int ahci_host_activate_multi_irqs(struct ata_host
*host
,
2533 struct scsi_host_template
*sht
)
2535 struct ahci_host_priv
*hpriv
= host
->private_data
;
2538 rc
= ata_host_start(host
);
2542 * Requests IRQs according to AHCI-1.1 when multiple MSIs were
2543 * allocated. That is one MSI per port, starting from @irq.
2545 for (i
= 0; i
< host
->n_ports
; i
++) {
2546 struct ahci_port_priv
*pp
= host
->ports
[i
]->private_data
;
2547 int irq
= hpriv
->get_irq_vector(host
, i
);
2549 /* Do not receive interrupts sent by dummy ports */
2555 rc
= devm_request_irq(host
->dev
, irq
, ahci_multi_irqs_intr_hard
,
2556 0, pp
->irq_desc
, host
->ports
[i
]);
2560 ata_port_desc(host
->ports
[i
], "irq %d", irq
);
2563 return ata_host_register(host
, sht
);
2567 * ahci_host_activate - start AHCI host, request IRQs and register it
2568 * @host: target ATA host
2569 * @sht: scsi_host_template to use when registering the host
2572 * Inherited from calling layer (may sleep).
2575 * 0 on success, -errno otherwise.
2577 int ahci_host_activate(struct ata_host
*host
, struct scsi_host_template
*sht
)
2579 struct ahci_host_priv
*hpriv
= host
->private_data
;
2580 int irq
= hpriv
->irq
;
2583 if (hpriv
->flags
& AHCI_HFLAG_MULTI_MSI
) {
2584 if (hpriv
->irq_handler
&&
2585 hpriv
->irq_handler
!= ahci_single_level_irq_intr
)
2587 "both AHCI_HFLAG_MULTI_MSI flag set and custom irq handler implemented\n");
2588 if (!hpriv
->get_irq_vector
) {
2590 "AHCI_HFLAG_MULTI_MSI requires ->get_irq_vector!\n");
2594 rc
= ahci_host_activate_multi_irqs(host
, sht
);
2596 rc
= ata_host_activate(host
, irq
, hpriv
->irq_handler
,
2603 EXPORT_SYMBOL_GPL(ahci_host_activate
);
2605 MODULE_AUTHOR("Jeff Garzik");
2606 MODULE_DESCRIPTION("Common AHCI SATA low-level routines");
2607 MODULE_LICENSE("GPL");