1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * libahci.c - Common AHCI SATA low-level routines
5 * Maintained by: Tejun Heo <tj@kernel.org>
6 * Please ALWAYS copy linux-ide@vger.kernel.org
9 * Copyright 2004-2005 Red Hat, Inc.
11 * libata documentation is available via 'make {ps|pdf}docs',
12 * as Documentation/driver-api/libata.rst
14 * AHCI hardware documentation:
15 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
16 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
19 #include <linux/bitops.h>
20 #include <linux/kernel.h>
21 #include <linux/gfp.h>
22 #include <linux/module.h>
23 #include <linux/nospec.h>
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_cmnd.h>
31 #include <linux/libata.h>
32 #include <linux/pci.h>
36 static int ahci_skip_host_reset
;
38 EXPORT_SYMBOL_GPL(ahci_ignore_sss
);
40 module_param_named(skip_host_reset
, ahci_skip_host_reset
, int, 0444);
41 MODULE_PARM_DESC(skip_host_reset
, "skip global host reset (0=don't skip, 1=skip)");
43 module_param_named(ignore_sss
, ahci_ignore_sss
, int, 0444);
44 MODULE_PARM_DESC(ignore_sss
, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
46 static int ahci_set_lpm(struct ata_link
*link
, enum ata_lpm_policy policy
,
48 static ssize_t
ahci_led_show(struct ata_port
*ap
, char *buf
);
49 static ssize_t
ahci_led_store(struct ata_port
*ap
, const char *buf
,
51 static ssize_t
ahci_transmit_led_message(struct ata_port
*ap
, u32 state
,
56 static int ahci_scr_read(struct ata_link
*link
, unsigned int sc_reg
, u32
*val
);
57 static int ahci_scr_write(struct ata_link
*link
, unsigned int sc_reg
, u32 val
);
58 static void ahci_qc_fill_rtf(struct ata_queued_cmd
*qc
);
59 static void ahci_qc_ncq_fill_rtf(struct ata_port
*ap
, u64 done_mask
);
60 static int ahci_port_start(struct ata_port
*ap
);
61 static void ahci_port_stop(struct ata_port
*ap
);
62 static enum ata_completion_errors
ahci_qc_prep(struct ata_queued_cmd
*qc
);
63 static int ahci_pmp_qc_defer(struct ata_queued_cmd
*qc
);
64 static void ahci_freeze(struct ata_port
*ap
);
65 static void ahci_thaw(struct ata_port
*ap
);
66 static void ahci_set_aggressive_devslp(struct ata_port
*ap
, bool sleep
);
67 static void ahci_enable_fbs(struct ata_port
*ap
);
68 static void ahci_disable_fbs(struct ata_port
*ap
);
69 static void ahci_pmp_attach(struct ata_port
*ap
);
70 static void ahci_pmp_detach(struct ata_port
*ap
);
71 static int ahci_softreset(struct ata_link
*link
, unsigned int *class,
72 unsigned long deadline
);
73 static int ahci_pmp_retry_softreset(struct ata_link
*link
, unsigned int *class,
74 unsigned long deadline
);
75 static int ahci_hardreset(struct ata_link
*link
, unsigned int *class,
76 unsigned long deadline
);
77 static void ahci_postreset(struct ata_link
*link
, unsigned int *class);
78 static void ahci_post_internal_cmd(struct ata_queued_cmd
*qc
);
79 static void ahci_dev_config(struct ata_device
*dev
);
81 static int ahci_port_suspend(struct ata_port
*ap
, pm_message_t mesg
);
83 static ssize_t
ahci_activity_show(struct ata_device
*dev
, char *buf
);
84 static ssize_t
ahci_activity_store(struct ata_device
*dev
,
85 enum sw_activity val
);
86 static void ahci_init_sw_activity(struct ata_link
*link
);
88 static ssize_t
ahci_show_host_caps(struct device
*dev
,
89 struct device_attribute
*attr
, char *buf
);
90 static ssize_t
ahci_show_host_cap2(struct device
*dev
,
91 struct device_attribute
*attr
, char *buf
);
92 static ssize_t
ahci_show_host_version(struct device
*dev
,
93 struct device_attribute
*attr
, char *buf
);
94 static ssize_t
ahci_show_port_cmd(struct device
*dev
,
95 struct device_attribute
*attr
, char *buf
);
96 static ssize_t
ahci_read_em_buffer(struct device
*dev
,
97 struct device_attribute
*attr
, char *buf
);
98 static ssize_t
ahci_store_em_buffer(struct device
*dev
,
99 struct device_attribute
*attr
,
100 const char *buf
, size_t size
);
101 static ssize_t
ahci_show_em_supported(struct device
*dev
,
102 struct device_attribute
*attr
, char *buf
);
103 static irqreturn_t
ahci_single_level_irq_intr(int irq
, void *dev_instance
);
105 static DEVICE_ATTR(ahci_host_caps
, S_IRUGO
, ahci_show_host_caps
, NULL
);
106 static DEVICE_ATTR(ahci_host_cap2
, S_IRUGO
, ahci_show_host_cap2
, NULL
);
107 static DEVICE_ATTR(ahci_host_version
, S_IRUGO
, ahci_show_host_version
, NULL
);
108 static DEVICE_ATTR(ahci_port_cmd
, S_IRUGO
, ahci_show_port_cmd
, NULL
);
109 static DEVICE_ATTR(em_buffer
, S_IWUSR
| S_IRUGO
,
110 ahci_read_em_buffer
, ahci_store_em_buffer
);
111 static DEVICE_ATTR(em_message_supported
, S_IRUGO
, ahci_show_em_supported
, NULL
);
113 static struct attribute
*ahci_shost_attrs
[] = {
114 &dev_attr_link_power_management_policy
.attr
,
115 &dev_attr_em_message_type
.attr
,
116 &dev_attr_em_message
.attr
,
117 &dev_attr_ahci_host_caps
.attr
,
118 &dev_attr_ahci_host_cap2
.attr
,
119 &dev_attr_ahci_host_version
.attr
,
120 &dev_attr_ahci_port_cmd
.attr
,
121 &dev_attr_em_buffer
.attr
,
122 &dev_attr_em_message_supported
.attr
,
126 static const struct attribute_group ahci_shost_attr_group
= {
127 .attrs
= ahci_shost_attrs
130 const struct attribute_group
*ahci_shost_groups
[] = {
131 &ahci_shost_attr_group
,
134 EXPORT_SYMBOL_GPL(ahci_shost_groups
);
136 static struct attribute
*ahci_sdev_attrs
[] = {
137 &dev_attr_sw_activity
.attr
,
138 &dev_attr_unload_heads
.attr
,
139 &dev_attr_ncq_prio_supported
.attr
,
140 &dev_attr_ncq_prio_enable
.attr
,
144 static const struct attribute_group ahci_sdev_attr_group
= {
145 .attrs
= ahci_sdev_attrs
148 const struct attribute_group
*ahci_sdev_groups
[] = {
149 &ahci_sdev_attr_group
,
152 EXPORT_SYMBOL_GPL(ahci_sdev_groups
);
154 struct ata_port_operations ahci_ops
= {
155 .inherits
= &sata_pmp_port_ops
,
157 .qc_defer
= ahci_pmp_qc_defer
,
158 .qc_prep
= ahci_qc_prep
,
159 .qc_issue
= ahci_qc_issue
,
160 .qc_fill_rtf
= ahci_qc_fill_rtf
,
161 .qc_ncq_fill_rtf
= ahci_qc_ncq_fill_rtf
,
163 .freeze
= ahci_freeze
,
165 .softreset
= ahci_softreset
,
166 .hardreset
= ahci_hardreset
,
167 .postreset
= ahci_postreset
,
168 .pmp_softreset
= ahci_softreset
,
169 .error_handler
= ahci_error_handler
,
170 .post_internal_cmd
= ahci_post_internal_cmd
,
171 .dev_config
= ahci_dev_config
,
173 .scr_read
= ahci_scr_read
,
174 .scr_write
= ahci_scr_write
,
175 .pmp_attach
= ahci_pmp_attach
,
176 .pmp_detach
= ahci_pmp_detach
,
178 .set_lpm
= ahci_set_lpm
,
179 .em_show
= ahci_led_show
,
180 .em_store
= ahci_led_store
,
181 .sw_activity_show
= ahci_activity_show
,
182 .sw_activity_store
= ahci_activity_store
,
183 .transmit_led_message
= ahci_transmit_led_message
,
185 .port_suspend
= ahci_port_suspend
,
186 .port_resume
= ahci_port_resume
,
188 .port_start
= ahci_port_start
,
189 .port_stop
= ahci_port_stop
,
191 EXPORT_SYMBOL_GPL(ahci_ops
);
193 struct ata_port_operations ahci_pmp_retry_srst_ops
= {
194 .inherits
= &ahci_ops
,
195 .softreset
= ahci_pmp_retry_softreset
,
197 EXPORT_SYMBOL_GPL(ahci_pmp_retry_srst_ops
);
199 static bool ahci_em_messages __read_mostly
= true;
200 module_param(ahci_em_messages
, bool, 0444);
201 /* add other LED protocol types when they become supported */
202 MODULE_PARM_DESC(ahci_em_messages
,
203 "AHCI Enclosure Management Message control (0 = off, 1 = on)");
205 /* device sleep idle timeout in ms */
206 static int devslp_idle_timeout __read_mostly
= 1000;
207 module_param(devslp_idle_timeout
, int, 0644);
208 MODULE_PARM_DESC(devslp_idle_timeout
, "device sleep idle timeout");
210 static void ahci_enable_ahci(void __iomem
*mmio
)
215 /* turn on AHCI_EN */
216 tmp
= readl(mmio
+ HOST_CTL
);
217 if (tmp
& HOST_AHCI_EN
)
220 /* Some controllers need AHCI_EN to be written multiple times.
221 * Try a few times before giving up.
223 for (i
= 0; i
< 5; i
++) {
225 writel(tmp
, mmio
+ HOST_CTL
);
226 tmp
= readl(mmio
+ HOST_CTL
); /* flush && sanity check */
227 if (tmp
& HOST_AHCI_EN
)
236 * ahci_rpm_get_port - Make sure the port is powered on
237 * @ap: Port to power on
239 * Whenever there is need to access the AHCI host registers outside of
240 * normal execution paths, call this function to make sure the host is
241 * actually powered on.
243 static int ahci_rpm_get_port(struct ata_port
*ap
)
245 return pm_runtime_get_sync(ap
->dev
);
249 * ahci_rpm_put_port - Undoes ahci_rpm_get_port()
250 * @ap: Port to power down
252 * Undoes ahci_rpm_get_port() and possibly powers down the AHCI host
253 * if it has no more active users.
255 static void ahci_rpm_put_port(struct ata_port
*ap
)
257 pm_runtime_put(ap
->dev
);
260 static ssize_t
ahci_show_host_caps(struct device
*dev
,
261 struct device_attribute
*attr
, char *buf
)
263 struct Scsi_Host
*shost
= class_to_shost(dev
);
264 struct ata_port
*ap
= ata_shost_to_port(shost
);
265 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
267 return sprintf(buf
, "%x\n", hpriv
->cap
);
270 static ssize_t
ahci_show_host_cap2(struct device
*dev
,
271 struct device_attribute
*attr
, char *buf
)
273 struct Scsi_Host
*shost
= class_to_shost(dev
);
274 struct ata_port
*ap
= ata_shost_to_port(shost
);
275 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
277 return sprintf(buf
, "%x\n", hpriv
->cap2
);
280 static ssize_t
ahci_show_host_version(struct device
*dev
,
281 struct device_attribute
*attr
, char *buf
)
283 struct Scsi_Host
*shost
= class_to_shost(dev
);
284 struct ata_port
*ap
= ata_shost_to_port(shost
);
285 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
287 return sprintf(buf
, "%x\n", hpriv
->version
);
290 static ssize_t
ahci_show_port_cmd(struct device
*dev
,
291 struct device_attribute
*attr
, char *buf
)
293 struct Scsi_Host
*shost
= class_to_shost(dev
);
294 struct ata_port
*ap
= ata_shost_to_port(shost
);
295 void __iomem
*port_mmio
= ahci_port_base(ap
);
298 ahci_rpm_get_port(ap
);
299 ret
= sprintf(buf
, "%x\n", readl(port_mmio
+ PORT_CMD
));
300 ahci_rpm_put_port(ap
);
305 static ssize_t
ahci_read_em_buffer(struct device
*dev
,
306 struct device_attribute
*attr
, char *buf
)
308 struct Scsi_Host
*shost
= class_to_shost(dev
);
309 struct ata_port
*ap
= ata_shost_to_port(shost
);
310 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
311 void __iomem
*mmio
= hpriv
->mmio
;
312 void __iomem
*em_mmio
= mmio
+ hpriv
->em_loc
;
318 ahci_rpm_get_port(ap
);
319 spin_lock_irqsave(ap
->lock
, flags
);
321 em_ctl
= readl(mmio
+ HOST_EM_CTL
);
322 if (!(ap
->flags
& ATA_FLAG_EM
) || em_ctl
& EM_CTL_XMT
||
323 !(hpriv
->em_msg_type
& EM_MSG_TYPE_SGPIO
)) {
324 spin_unlock_irqrestore(ap
->lock
, flags
);
325 ahci_rpm_put_port(ap
);
329 if (!(em_ctl
& EM_CTL_MR
)) {
330 spin_unlock_irqrestore(ap
->lock
, flags
);
331 ahci_rpm_put_port(ap
);
335 if (!(em_ctl
& EM_CTL_SMB
))
336 em_mmio
+= hpriv
->em_buf_sz
;
338 count
= hpriv
->em_buf_sz
;
340 /* the count should not be larger than PAGE_SIZE */
341 if (count
> PAGE_SIZE
) {
342 if (printk_ratelimit())
344 "EM read buffer size too large: "
345 "buffer size %u, page size %lu\n",
346 hpriv
->em_buf_sz
, PAGE_SIZE
);
350 for (i
= 0; i
< count
; i
+= 4) {
351 msg
= readl(em_mmio
+ i
);
353 buf
[i
+ 1] = (msg
>> 8) & 0xff;
354 buf
[i
+ 2] = (msg
>> 16) & 0xff;
355 buf
[i
+ 3] = (msg
>> 24) & 0xff;
358 spin_unlock_irqrestore(ap
->lock
, flags
);
359 ahci_rpm_put_port(ap
);
364 static ssize_t
ahci_store_em_buffer(struct device
*dev
,
365 struct device_attribute
*attr
,
366 const char *buf
, size_t size
)
368 struct Scsi_Host
*shost
= class_to_shost(dev
);
369 struct ata_port
*ap
= ata_shost_to_port(shost
);
370 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
371 void __iomem
*mmio
= hpriv
->mmio
;
372 void __iomem
*em_mmio
= mmio
+ hpriv
->em_loc
;
373 const unsigned char *msg_buf
= buf
;
378 /* check size validity */
379 if (!(ap
->flags
& ATA_FLAG_EM
) ||
380 !(hpriv
->em_msg_type
& EM_MSG_TYPE_SGPIO
) ||
381 size
% 4 || size
> hpriv
->em_buf_sz
)
384 ahci_rpm_get_port(ap
);
385 spin_lock_irqsave(ap
->lock
, flags
);
387 em_ctl
= readl(mmio
+ HOST_EM_CTL
);
388 if (em_ctl
& EM_CTL_TM
) {
389 spin_unlock_irqrestore(ap
->lock
, flags
);
390 ahci_rpm_put_port(ap
);
394 for (i
= 0; i
< size
; i
+= 4) {
395 msg
= msg_buf
[i
] | msg_buf
[i
+ 1] << 8 |
396 msg_buf
[i
+ 2] << 16 | msg_buf
[i
+ 3] << 24;
397 writel(msg
, em_mmio
+ i
);
400 writel(em_ctl
| EM_CTL_TM
, mmio
+ HOST_EM_CTL
);
402 spin_unlock_irqrestore(ap
->lock
, flags
);
403 ahci_rpm_put_port(ap
);
408 static ssize_t
ahci_show_em_supported(struct device
*dev
,
409 struct device_attribute
*attr
, char *buf
)
411 struct Scsi_Host
*shost
= class_to_shost(dev
);
412 struct ata_port
*ap
= ata_shost_to_port(shost
);
413 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
414 void __iomem
*mmio
= hpriv
->mmio
;
417 ahci_rpm_get_port(ap
);
418 em_ctl
= readl(mmio
+ HOST_EM_CTL
);
419 ahci_rpm_put_port(ap
);
421 return sprintf(buf
, "%s%s%s%s\n",
422 em_ctl
& EM_CTL_LED
? "led " : "",
423 em_ctl
& EM_CTL_SAFTE
? "saf-te " : "",
424 em_ctl
& EM_CTL_SES
? "ses-2 " : "",
425 em_ctl
& EM_CTL_SGPIO
? "sgpio " : "");
429 * ahci_save_initial_config - Save and fixup initial config values
430 * @dev: target AHCI device
431 * @hpriv: host private area to store config values
433 * Some registers containing configuration info might be setup by
434 * BIOS and might be cleared on reset. This function saves the
435 * initial values of those registers into @hpriv such that they
436 * can be restored after controller reset.
438 * If inconsistent, config values are fixed up by this function.
440 * If it is not set already this function sets hpriv->start_engine to
446 void ahci_save_initial_config(struct device
*dev
, struct ahci_host_priv
*hpriv
)
448 void __iomem
*mmio
= hpriv
->mmio
;
449 void __iomem
*port_mmio
;
450 unsigned long port_map
;
454 /* make sure AHCI mode is enabled before accessing CAP */
455 ahci_enable_ahci(mmio
);
458 * Values prefixed with saved_ are written back to the HBA and ports
459 * registers after reset. Values without are used for driver operation.
463 * Override HW-init HBA capability fields with the platform-specific
464 * values. The rest of the HBA capabilities are defined as Read-only
465 * and can't be modified in CSR anyway.
467 cap
= readl(mmio
+ HOST_CAP
);
468 if (hpriv
->saved_cap
)
469 cap
= (cap
& ~(HOST_CAP_SSS
| HOST_CAP_MPS
)) | hpriv
->saved_cap
;
470 hpriv
->saved_cap
= cap
;
472 /* CAP2 register is only defined for AHCI 1.2 and later */
473 vers
= readl(mmio
+ HOST_VERSION
);
474 if ((vers
>> 16) > 1 ||
475 ((vers
>> 16) == 1 && (vers
& 0xFFFF) >= 0x200))
476 hpriv
->saved_cap2
= cap2
= readl(mmio
+ HOST_CAP2
);
478 hpriv
->saved_cap2
= cap2
= 0;
480 /* some chips have errata preventing 64bit use */
481 if ((cap
& HOST_CAP_64
) && (hpriv
->flags
& AHCI_HFLAG_32BIT_ONLY
)) {
482 dev_info(dev
, "controller can't do 64bit DMA, forcing 32bit\n");
486 if ((cap
& HOST_CAP_NCQ
) && (hpriv
->flags
& AHCI_HFLAG_NO_NCQ
)) {
487 dev_info(dev
, "controller can't do NCQ, turning off CAP_NCQ\n");
488 cap
&= ~HOST_CAP_NCQ
;
491 if (!(cap
& HOST_CAP_NCQ
) && (hpriv
->flags
& AHCI_HFLAG_YES_NCQ
)) {
492 dev_info(dev
, "controller can do NCQ, turning on CAP_NCQ\n");
496 if ((cap
& HOST_CAP_PMP
) && (hpriv
->flags
& AHCI_HFLAG_NO_PMP
)) {
497 dev_info(dev
, "controller can't do PMP, turning off CAP_PMP\n");
498 cap
&= ~HOST_CAP_PMP
;
501 if ((cap
& HOST_CAP_SNTF
) && (hpriv
->flags
& AHCI_HFLAG_NO_SNTF
)) {
503 "controller can't do SNTF, turning off CAP_SNTF\n");
504 cap
&= ~HOST_CAP_SNTF
;
507 if ((cap2
& HOST_CAP2_SDS
) && (hpriv
->flags
& AHCI_HFLAG_NO_DEVSLP
)) {
509 "controller can't do DEVSLP, turning off\n");
510 cap2
&= ~HOST_CAP2_SDS
;
511 cap2
&= ~HOST_CAP2_SADM
;
514 if (!(cap
& HOST_CAP_FBS
) && (hpriv
->flags
& AHCI_HFLAG_YES_FBS
)) {
515 dev_info(dev
, "controller can do FBS, turning on CAP_FBS\n");
519 if ((cap
& HOST_CAP_FBS
) && (hpriv
->flags
& AHCI_HFLAG_NO_FBS
)) {
520 dev_info(dev
, "controller can't do FBS, turning off CAP_FBS\n");
521 cap
&= ~HOST_CAP_FBS
;
524 if (!(cap
& HOST_CAP_ALPM
) && (hpriv
->flags
& AHCI_HFLAG_YES_ALPM
)) {
525 dev_info(dev
, "controller can do ALPM, turning on CAP_ALPM\n");
526 cap
|= HOST_CAP_ALPM
;
529 if ((cap
& HOST_CAP_SXS
) && (hpriv
->flags
& AHCI_HFLAG_NO_SXS
)) {
530 dev_info(dev
, "controller does not support SXS, disabling CAP_SXS\n");
531 cap
&= ~HOST_CAP_SXS
;
534 /* Override the HBA ports mapping if the platform needs it */
535 port_map
= readl(mmio
+ HOST_PORTS_IMPL
);
536 if (hpriv
->saved_port_map
&& port_map
!= hpriv
->saved_port_map
) {
537 dev_info(dev
, "forcing port_map 0x%lx -> 0x%x\n",
538 port_map
, hpriv
->saved_port_map
);
539 port_map
= hpriv
->saved_port_map
;
541 hpriv
->saved_port_map
= port_map
;
544 if (hpriv
->mask_port_map
) {
545 dev_warn(dev
, "masking port_map 0x%lx -> 0x%lx\n",
547 port_map
& hpriv
->mask_port_map
);
548 port_map
&= hpriv
->mask_port_map
;
551 /* cross check port_map and cap.n_ports */
555 for (i
= 0; i
< AHCI_MAX_PORTS
; i
++)
556 if (port_map
& (1 << i
))
559 /* If PI has more ports than n_ports, whine, clear
560 * port_map and let it be generated from n_ports.
562 if (map_ports
> ahci_nr_ports(cap
)) {
564 "implemented port map (0x%lx) contains more ports than nr_ports (%u), using nr_ports\n",
565 port_map
, ahci_nr_ports(cap
));
570 /* fabricate port_map from cap.nr_ports for < AHCI 1.3 */
571 if (!port_map
&& vers
< 0x10300) {
572 port_map
= (1 << ahci_nr_ports(cap
)) - 1;
573 dev_warn(dev
, "forcing PORTS_IMPL to 0x%lx\n", port_map
);
575 /* write the fixed up value to the PI register */
576 hpriv
->saved_port_map
= port_map
;
580 * Preserve the ports capabilities defined by the platform. Note there
581 * is no need in storing the rest of the P#.CMD fields since they are
584 for_each_set_bit(i
, &port_map
, AHCI_MAX_PORTS
) {
585 if (hpriv
->saved_port_cap
[i
])
588 port_mmio
= __ahci_port_base(hpriv
, i
);
589 hpriv
->saved_port_cap
[i
] =
590 readl(port_mmio
+ PORT_CMD
) & PORT_CMD_CAP
;
593 /* record values to use during operation */
596 hpriv
->version
= vers
;
597 hpriv
->port_map
= port_map
;
599 if (!hpriv
->start_engine
)
600 hpriv
->start_engine
= ahci_start_engine
;
602 if (!hpriv
->stop_engine
)
603 hpriv
->stop_engine
= ahci_stop_engine
;
605 if (!hpriv
->irq_handler
)
606 hpriv
->irq_handler
= ahci_single_level_irq_intr
;
608 EXPORT_SYMBOL_GPL(ahci_save_initial_config
);
611 * ahci_restore_initial_config - Restore initial config
612 * @host: target ATA host
614 * Restore initial config stored by ahci_save_initial_config().
619 static void ahci_restore_initial_config(struct ata_host
*host
)
621 struct ahci_host_priv
*hpriv
= host
->private_data
;
622 unsigned long port_map
= hpriv
->port_map
;
623 void __iomem
*mmio
= hpriv
->mmio
;
624 void __iomem
*port_mmio
;
627 writel(hpriv
->saved_cap
, mmio
+ HOST_CAP
);
628 if (hpriv
->saved_cap2
)
629 writel(hpriv
->saved_cap2
, mmio
+ HOST_CAP2
);
630 writel(hpriv
->saved_port_map
, mmio
+ HOST_PORTS_IMPL
);
631 (void) readl(mmio
+ HOST_PORTS_IMPL
); /* flush */
633 for_each_set_bit(i
, &port_map
, AHCI_MAX_PORTS
) {
634 port_mmio
= __ahci_port_base(hpriv
, i
);
635 writel(hpriv
->saved_port_cap
[i
], port_mmio
+ PORT_CMD
);
639 static unsigned ahci_scr_offset(struct ata_port
*ap
, unsigned int sc_reg
)
641 static const int offset
[] = {
642 [SCR_STATUS
] = PORT_SCR_STAT
,
643 [SCR_CONTROL
] = PORT_SCR_CTL
,
644 [SCR_ERROR
] = PORT_SCR_ERR
,
645 [SCR_ACTIVE
] = PORT_SCR_ACT
,
646 [SCR_NOTIFICATION
] = PORT_SCR_NTF
,
648 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
650 if (sc_reg
< ARRAY_SIZE(offset
) &&
651 (sc_reg
!= SCR_NOTIFICATION
|| (hpriv
->cap
& HOST_CAP_SNTF
)))
652 return offset
[sc_reg
];
656 static int ahci_scr_read(struct ata_link
*link
, unsigned int sc_reg
, u32
*val
)
658 void __iomem
*port_mmio
= ahci_port_base(link
->ap
);
659 int offset
= ahci_scr_offset(link
->ap
, sc_reg
);
662 *val
= readl(port_mmio
+ offset
);
668 static int ahci_scr_write(struct ata_link
*link
, unsigned int sc_reg
, u32 val
)
670 void __iomem
*port_mmio
= ahci_port_base(link
->ap
);
671 int offset
= ahci_scr_offset(link
->ap
, sc_reg
);
674 writel(val
, port_mmio
+ offset
);
680 void ahci_start_engine(struct ata_port
*ap
)
682 void __iomem
*port_mmio
= ahci_port_base(ap
);
686 tmp
= readl(port_mmio
+ PORT_CMD
);
687 tmp
|= PORT_CMD_START
;
688 writel(tmp
, port_mmio
+ PORT_CMD
);
689 readl(port_mmio
+ PORT_CMD
); /* flush */
691 EXPORT_SYMBOL_GPL(ahci_start_engine
);
693 int ahci_stop_engine(struct ata_port
*ap
)
695 void __iomem
*port_mmio
= ahci_port_base(ap
);
696 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
700 * On some controllers, stopping a port's DMA engine while the port
701 * is in ALPM state (partial or slumber) results in failures on
702 * subsequent DMA engine starts. For those controllers, put the
703 * port back in active state before stopping its DMA engine.
705 if ((hpriv
->flags
& AHCI_HFLAG_WAKE_BEFORE_STOP
) &&
706 (ap
->link
.lpm_policy
> ATA_LPM_MAX_POWER
) &&
707 ahci_set_lpm(&ap
->link
, ATA_LPM_MAX_POWER
, ATA_LPM_WAKE_ONLY
)) {
708 dev_err(ap
->host
->dev
, "Failed to wake up port before engine stop\n");
712 tmp
= readl(port_mmio
+ PORT_CMD
);
714 /* check if the HBA is idle */
715 if ((tmp
& (PORT_CMD_START
| PORT_CMD_LIST_ON
)) == 0)
719 * Don't try to issue commands but return with ENODEV if the
720 * AHCI controller not available anymore (e.g. due to PCIe hot
721 * unplugging). Otherwise a 500ms delay for each port is added.
723 if (tmp
== 0xffffffff) {
724 dev_err(ap
->host
->dev
, "AHCI controller unavailable!\n");
728 /* setting HBA to idle */
729 tmp
&= ~PORT_CMD_START
;
730 writel(tmp
, port_mmio
+ PORT_CMD
);
732 /* wait for engine to stop. This could be as long as 500 msec */
733 tmp
= ata_wait_register(ap
, port_mmio
+ PORT_CMD
,
734 PORT_CMD_LIST_ON
, PORT_CMD_LIST_ON
, 1, 500);
735 if (tmp
& PORT_CMD_LIST_ON
)
740 EXPORT_SYMBOL_GPL(ahci_stop_engine
);
742 void ahci_start_fis_rx(struct ata_port
*ap
)
744 void __iomem
*port_mmio
= ahci_port_base(ap
);
745 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
746 struct ahci_port_priv
*pp
= ap
->private_data
;
749 /* set FIS registers */
750 if (hpriv
->cap
& HOST_CAP_64
)
751 writel((pp
->cmd_slot_dma
>> 16) >> 16,
752 port_mmio
+ PORT_LST_ADDR_HI
);
753 writel(pp
->cmd_slot_dma
& 0xffffffff, port_mmio
+ PORT_LST_ADDR
);
755 if (hpriv
->cap
& HOST_CAP_64
)
756 writel((pp
->rx_fis_dma
>> 16) >> 16,
757 port_mmio
+ PORT_FIS_ADDR_HI
);
758 writel(pp
->rx_fis_dma
& 0xffffffff, port_mmio
+ PORT_FIS_ADDR
);
760 /* enable FIS reception */
761 tmp
= readl(port_mmio
+ PORT_CMD
);
762 tmp
|= PORT_CMD_FIS_RX
;
763 writel(tmp
, port_mmio
+ PORT_CMD
);
766 readl(port_mmio
+ PORT_CMD
);
768 EXPORT_SYMBOL_GPL(ahci_start_fis_rx
);
770 static int ahci_stop_fis_rx(struct ata_port
*ap
)
772 void __iomem
*port_mmio
= ahci_port_base(ap
);
775 /* disable FIS reception */
776 tmp
= readl(port_mmio
+ PORT_CMD
);
777 tmp
&= ~PORT_CMD_FIS_RX
;
778 writel(tmp
, port_mmio
+ PORT_CMD
);
780 /* wait for completion, spec says 500ms, give it 1000 */
781 tmp
= ata_wait_register(ap
, port_mmio
+ PORT_CMD
, PORT_CMD_FIS_ON
,
782 PORT_CMD_FIS_ON
, 10, 1000);
783 if (tmp
& PORT_CMD_FIS_ON
)
789 static void ahci_power_up(struct ata_port
*ap
)
791 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
792 void __iomem
*port_mmio
= ahci_port_base(ap
);
795 cmd
= readl(port_mmio
+ PORT_CMD
) & ~PORT_CMD_ICC_MASK
;
798 if (hpriv
->cap
& HOST_CAP_SSS
) {
799 cmd
|= PORT_CMD_SPIN_UP
;
800 writel(cmd
, port_mmio
+ PORT_CMD
);
804 writel(cmd
| PORT_CMD_ICC_ACTIVE
, port_mmio
+ PORT_CMD
);
807 static int ahci_set_lpm(struct ata_link
*link
, enum ata_lpm_policy policy
,
810 struct ata_port
*ap
= link
->ap
;
811 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
812 struct ahci_port_priv
*pp
= ap
->private_data
;
813 void __iomem
*port_mmio
= ahci_port_base(ap
);
815 if (policy
!= ATA_LPM_MAX_POWER
) {
816 /* wakeup flag only applies to the max power policy */
817 hints
&= ~ATA_LPM_WAKE_ONLY
;
820 * Disable interrupts on Phy Ready. This keeps us from
821 * getting woken up due to spurious phy ready
824 pp
->intr_mask
&= ~PORT_IRQ_PHYRDY
;
825 writel(pp
->intr_mask
, port_mmio
+ PORT_IRQ_MASK
);
827 sata_link_scr_lpm(link
, policy
, false);
830 if (hpriv
->cap
& HOST_CAP_ALPM
) {
831 u32 cmd
= readl(port_mmio
+ PORT_CMD
);
833 if (policy
== ATA_LPM_MAX_POWER
|| !(hints
& ATA_LPM_HIPM
)) {
834 if (!(hints
& ATA_LPM_WAKE_ONLY
))
835 cmd
&= ~(PORT_CMD_ASP
| PORT_CMD_ALPE
);
836 cmd
|= PORT_CMD_ICC_ACTIVE
;
838 writel(cmd
, port_mmio
+ PORT_CMD
);
839 readl(port_mmio
+ PORT_CMD
);
841 /* wait 10ms to be sure we've come out of LPM state */
844 if (hints
& ATA_LPM_WAKE_ONLY
)
847 cmd
|= PORT_CMD_ALPE
;
848 if (policy
== ATA_LPM_MIN_POWER
)
850 else if (policy
== ATA_LPM_MIN_POWER_WITH_PARTIAL
)
851 cmd
&= ~PORT_CMD_ASP
;
853 /* write out new cmd value */
854 writel(cmd
, port_mmio
+ PORT_CMD
);
858 /* set aggressive device sleep */
859 if ((hpriv
->cap2
& HOST_CAP2_SDS
) &&
860 (hpriv
->cap2
& HOST_CAP2_SADM
) &&
861 (link
->device
->flags
& ATA_DFLAG_DEVSLP
)) {
862 if (policy
== ATA_LPM_MIN_POWER
||
863 policy
== ATA_LPM_MIN_POWER_WITH_PARTIAL
)
864 ahci_set_aggressive_devslp(ap
, true);
866 ahci_set_aggressive_devslp(ap
, false);
869 if (policy
== ATA_LPM_MAX_POWER
) {
870 sata_link_scr_lpm(link
, policy
, false);
872 /* turn PHYRDY IRQ back on */
873 pp
->intr_mask
|= PORT_IRQ_PHYRDY
;
874 writel(pp
->intr_mask
, port_mmio
+ PORT_IRQ_MASK
);
881 static void ahci_power_down(struct ata_port
*ap
)
883 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
884 void __iomem
*port_mmio
= ahci_port_base(ap
);
887 if (!(hpriv
->cap
& HOST_CAP_SSS
))
890 /* put device into listen mode, first set PxSCTL.DET to 0 */
891 scontrol
= readl(port_mmio
+ PORT_SCR_CTL
);
893 writel(scontrol
, port_mmio
+ PORT_SCR_CTL
);
895 /* then set PxCMD.SUD to 0 */
896 cmd
= readl(port_mmio
+ PORT_CMD
) & ~PORT_CMD_ICC_MASK
;
897 cmd
&= ~PORT_CMD_SPIN_UP
;
898 writel(cmd
, port_mmio
+ PORT_CMD
);
902 static void ahci_start_port(struct ata_port
*ap
)
904 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
905 struct ahci_port_priv
*pp
= ap
->private_data
;
906 struct ata_link
*link
;
907 struct ahci_em_priv
*emp
;
911 /* enable FIS reception */
912 ahci_start_fis_rx(ap
);
915 if (!(hpriv
->flags
& AHCI_HFLAG_DELAY_ENGINE
))
916 hpriv
->start_engine(ap
);
919 if (ap
->flags
& ATA_FLAG_EM
) {
920 ata_for_each_link(link
, ap
, EDGE
) {
921 emp
= &pp
->em_priv
[link
->pmp
];
923 /* EM Transmit bit maybe busy during init */
924 for (i
= 0; i
< EM_MAX_RETRY
; i
++) {
925 rc
= ap
->ops
->transmit_led_message(ap
,
929 * If busy, give a breather but do not
930 * release EH ownership by using msleep()
931 * instead of ata_msleep(). EM Transmit
932 * bit is busy for the whole host and
933 * releasing ownership will cause other
934 * ports to fail the same way.
944 if (ap
->flags
& ATA_FLAG_SW_ACTIVITY
)
945 ata_for_each_link(link
, ap
, EDGE
)
946 ahci_init_sw_activity(link
);
950 static int ahci_deinit_port(struct ata_port
*ap
, const char **emsg
)
953 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
956 rc
= hpriv
->stop_engine(ap
);
958 *emsg
= "failed to stop engine";
962 /* disable FIS reception */
963 rc
= ahci_stop_fis_rx(ap
);
965 *emsg
= "failed stop FIS RX";
972 int ahci_reset_controller(struct ata_host
*host
)
974 struct ahci_host_priv
*hpriv
= host
->private_data
;
975 void __iomem
*mmio
= hpriv
->mmio
;
979 * We must be in AHCI mode, before using anything AHCI-specific, such
982 ahci_enable_ahci(mmio
);
984 /* Global controller reset */
985 if (ahci_skip_host_reset
) {
986 dev_info(host
->dev
, "Skipping global host reset\n");
990 tmp
= readl(mmio
+ HOST_CTL
);
991 if (!(tmp
& HOST_RESET
)) {
992 writel(tmp
| HOST_RESET
, mmio
+ HOST_CTL
);
993 readl(mmio
+ HOST_CTL
); /* flush */
997 * To perform host reset, OS should set HOST_RESET and poll until this
998 * bit is read to be "0". Reset must complete within 1 second, or the
999 * hardware should be considered fried.
1001 tmp
= ata_wait_register(NULL
, mmio
+ HOST_CTL
, HOST_RESET
,
1002 HOST_RESET
, 10, 1000);
1003 if (tmp
& HOST_RESET
) {
1004 dev_err(host
->dev
, "Controller reset failed (0x%x)\n",
1009 /* Turn on AHCI mode */
1010 ahci_enable_ahci(mmio
);
1012 /* Some registers might be cleared on reset. Restore initial values. */
1013 if (!(hpriv
->flags
& AHCI_HFLAG_NO_WRITE_TO_RO
))
1014 ahci_restore_initial_config(host
);
1018 EXPORT_SYMBOL_GPL(ahci_reset_controller
);
1020 static void ahci_sw_activity(struct ata_link
*link
)
1022 struct ata_port
*ap
= link
->ap
;
1023 struct ahci_port_priv
*pp
= ap
->private_data
;
1024 struct ahci_em_priv
*emp
= &pp
->em_priv
[link
->pmp
];
1026 if (!(link
->flags
& ATA_LFLAG_SW_ACTIVITY
))
1030 if (!timer_pending(&emp
->timer
))
1031 mod_timer(&emp
->timer
, jiffies
+ msecs_to_jiffies(10));
1034 static void ahci_sw_activity_blink(struct timer_list
*t
)
1036 struct ahci_em_priv
*emp
= from_timer(emp
, t
, timer
);
1037 struct ata_link
*link
= emp
->link
;
1038 struct ata_port
*ap
= link
->ap
;
1040 unsigned long led_message
= emp
->led_state
;
1041 u32 activity_led_state
;
1042 unsigned long flags
;
1044 led_message
&= EM_MSG_LED_VALUE
;
1045 led_message
|= ap
->port_no
| (link
->pmp
<< 8);
1047 /* check to see if we've had activity. If so,
1048 * toggle state of LED and reset timer. If not,
1049 * turn LED to desired idle state.
1051 spin_lock_irqsave(ap
->lock
, flags
);
1052 if (emp
->saved_activity
!= emp
->activity
) {
1053 emp
->saved_activity
= emp
->activity
;
1054 /* get the current LED state */
1055 activity_led_state
= led_message
& EM_MSG_LED_VALUE_ON
;
1057 if (activity_led_state
)
1058 activity_led_state
= 0;
1060 activity_led_state
= 1;
1062 /* clear old state */
1063 led_message
&= ~EM_MSG_LED_VALUE_ACTIVITY
;
1066 led_message
|= (activity_led_state
<< 16);
1067 mod_timer(&emp
->timer
, jiffies
+ msecs_to_jiffies(100));
1069 /* switch to idle */
1070 led_message
&= ~EM_MSG_LED_VALUE_ACTIVITY
;
1071 if (emp
->blink_policy
== BLINK_OFF
)
1072 led_message
|= (1 << 16);
1074 spin_unlock_irqrestore(ap
->lock
, flags
);
1075 ap
->ops
->transmit_led_message(ap
, led_message
, 4);
1078 static void ahci_init_sw_activity(struct ata_link
*link
)
1080 struct ata_port
*ap
= link
->ap
;
1081 struct ahci_port_priv
*pp
= ap
->private_data
;
1082 struct ahci_em_priv
*emp
= &pp
->em_priv
[link
->pmp
];
1084 /* init activity stats, setup timer */
1085 emp
->saved_activity
= emp
->activity
= 0;
1087 timer_setup(&emp
->timer
, ahci_sw_activity_blink
, 0);
1089 /* check our blink policy and set flag for link if it's enabled */
1090 if (emp
->blink_policy
)
1091 link
->flags
|= ATA_LFLAG_SW_ACTIVITY
;
1094 int ahci_reset_em(struct ata_host
*host
)
1096 struct ahci_host_priv
*hpriv
= host
->private_data
;
1097 void __iomem
*mmio
= hpriv
->mmio
;
1100 em_ctl
= readl(mmio
+ HOST_EM_CTL
);
1101 if ((em_ctl
& EM_CTL_TM
) || (em_ctl
& EM_CTL_RST
))
1104 writel(em_ctl
| EM_CTL_RST
, mmio
+ HOST_EM_CTL
);
1107 EXPORT_SYMBOL_GPL(ahci_reset_em
);
1109 static ssize_t
ahci_transmit_led_message(struct ata_port
*ap
, u32 state
,
1112 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
1113 struct ahci_port_priv
*pp
= ap
->private_data
;
1114 void __iomem
*mmio
= hpriv
->mmio
;
1116 u32 message
[] = {0, 0};
1117 unsigned long flags
;
1119 struct ahci_em_priv
*emp
;
1121 /* get the slot number from the message */
1122 pmp
= (state
& EM_MSG_LED_PMP_SLOT
) >> 8;
1123 if (pmp
< EM_MAX_SLOTS
)
1124 emp
= &pp
->em_priv
[pmp
];
1128 ahci_rpm_get_port(ap
);
1129 spin_lock_irqsave(ap
->lock
, flags
);
1132 * if we are still busy transmitting a previous message,
1135 em_ctl
= readl(mmio
+ HOST_EM_CTL
);
1136 if (em_ctl
& EM_CTL_TM
) {
1137 spin_unlock_irqrestore(ap
->lock
, flags
);
1138 ahci_rpm_put_port(ap
);
1142 if (hpriv
->em_msg_type
& EM_MSG_TYPE_LED
) {
1144 * create message header - this is all zero except for
1145 * the message size, which is 4 bytes.
1147 message
[0] |= (4 << 8);
1149 /* ignore 0:4 of byte zero, fill in port info yourself */
1150 message
[1] = ((state
& ~EM_MSG_LED_HBA_PORT
) | ap
->port_no
);
1152 /* write message to EM_LOC */
1153 writel(message
[0], mmio
+ hpriv
->em_loc
);
1154 writel(message
[1], mmio
+ hpriv
->em_loc
+4);
1157 * tell hardware to transmit the message
1159 writel(em_ctl
| EM_CTL_TM
, mmio
+ HOST_EM_CTL
);
1162 /* save off new led state for port/slot */
1163 emp
->led_state
= state
;
1165 spin_unlock_irqrestore(ap
->lock
, flags
);
1166 ahci_rpm_put_port(ap
);
1171 static ssize_t
ahci_led_show(struct ata_port
*ap
, char *buf
)
1173 struct ahci_port_priv
*pp
= ap
->private_data
;
1174 struct ata_link
*link
;
1175 struct ahci_em_priv
*emp
;
1178 ata_for_each_link(link
, ap
, EDGE
) {
1179 emp
= &pp
->em_priv
[link
->pmp
];
1180 rc
+= sprintf(buf
, "%lx\n", emp
->led_state
);
1185 static ssize_t
ahci_led_store(struct ata_port
*ap
, const char *buf
,
1190 struct ahci_port_priv
*pp
= ap
->private_data
;
1191 struct ahci_em_priv
*emp
;
1193 if (kstrtouint(buf
, 0, &state
) < 0)
1196 /* get the slot number from the message */
1197 pmp
= (state
& EM_MSG_LED_PMP_SLOT
) >> 8;
1198 if (pmp
< EM_MAX_SLOTS
) {
1199 pmp
= array_index_nospec(pmp
, EM_MAX_SLOTS
);
1200 emp
= &pp
->em_priv
[pmp
];
1205 /* mask off the activity bits if we are in sw_activity
1206 * mode, user should turn off sw_activity before setting
1207 * activity led through em_message
1209 if (emp
->blink_policy
)
1210 state
&= ~EM_MSG_LED_VALUE_ACTIVITY
;
1212 return ap
->ops
->transmit_led_message(ap
, state
, size
);
1215 static ssize_t
ahci_activity_store(struct ata_device
*dev
, enum sw_activity val
)
1217 struct ata_link
*link
= dev
->link
;
1218 struct ata_port
*ap
= link
->ap
;
1219 struct ahci_port_priv
*pp
= ap
->private_data
;
1220 struct ahci_em_priv
*emp
= &pp
->em_priv
[link
->pmp
];
1221 u32 port_led_state
= emp
->led_state
;
1223 /* save the desired Activity LED behavior */
1226 link
->flags
&= ~(ATA_LFLAG_SW_ACTIVITY
);
1228 /* set the LED to OFF */
1229 port_led_state
&= EM_MSG_LED_VALUE_OFF
;
1230 port_led_state
|= (ap
->port_no
| (link
->pmp
<< 8));
1231 ap
->ops
->transmit_led_message(ap
, port_led_state
, 4);
1233 link
->flags
|= ATA_LFLAG_SW_ACTIVITY
;
1234 if (val
== BLINK_OFF
) {
1235 /* set LED to ON for idle */
1236 port_led_state
&= EM_MSG_LED_VALUE_OFF
;
1237 port_led_state
|= (ap
->port_no
| (link
->pmp
<< 8));
1238 port_led_state
|= EM_MSG_LED_VALUE_ON
; /* check this */
1239 ap
->ops
->transmit_led_message(ap
, port_led_state
, 4);
1242 emp
->blink_policy
= val
;
1246 static ssize_t
ahci_activity_show(struct ata_device
*dev
, char *buf
)
1248 struct ata_link
*link
= dev
->link
;
1249 struct ata_port
*ap
= link
->ap
;
1250 struct ahci_port_priv
*pp
= ap
->private_data
;
1251 struct ahci_em_priv
*emp
= &pp
->em_priv
[link
->pmp
];
1253 /* display the saved value of activity behavior for this
1256 return sprintf(buf
, "%d\n", emp
->blink_policy
);
1259 static void ahci_port_clear_pending_irq(struct ata_port
*ap
)
1261 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
1262 void __iomem
*port_mmio
= ahci_port_base(ap
);
1266 tmp
= readl(port_mmio
+ PORT_SCR_ERR
);
1267 dev_dbg(ap
->host
->dev
, "PORT_SCR_ERR 0x%x\n", tmp
);
1268 writel(tmp
, port_mmio
+ PORT_SCR_ERR
);
1270 /* clear port IRQ */
1271 tmp
= readl(port_mmio
+ PORT_IRQ_STAT
);
1272 dev_dbg(ap
->host
->dev
, "PORT_IRQ_STAT 0x%x\n", tmp
);
1274 writel(tmp
, port_mmio
+ PORT_IRQ_STAT
);
1276 writel(1 << ap
->port_no
, hpriv
->mmio
+ HOST_IRQ_STAT
);
1279 static void ahci_port_init(struct device
*dev
, struct ata_port
*ap
,
1280 int port_no
, void __iomem
*mmio
,
1281 void __iomem
*port_mmio
)
1283 const char *emsg
= NULL
;
1286 /* make sure port is not active */
1287 rc
= ahci_deinit_port(ap
, &emsg
);
1289 dev_warn(dev
, "%s (%d)\n", emsg
, rc
);
1291 ahci_port_clear_pending_irq(ap
);
1294 void ahci_init_controller(struct ata_host
*host
)
1296 struct ahci_host_priv
*hpriv
= host
->private_data
;
1297 void __iomem
*mmio
= hpriv
->mmio
;
1299 void __iomem
*port_mmio
;
1302 for (i
= 0; i
< host
->n_ports
; i
++) {
1303 struct ata_port
*ap
= host
->ports
[i
];
1305 port_mmio
= ahci_port_base(ap
);
1306 if (ata_port_is_dummy(ap
))
1309 ahci_port_init(host
->dev
, ap
, i
, mmio
, port_mmio
);
1312 tmp
= readl(mmio
+ HOST_CTL
);
1313 dev_dbg(host
->dev
, "HOST_CTL 0x%x\n", tmp
);
1314 writel(tmp
| HOST_IRQ_EN
, mmio
+ HOST_CTL
);
1315 tmp
= readl(mmio
+ HOST_CTL
);
1316 dev_dbg(host
->dev
, "HOST_CTL 0x%x\n", tmp
);
1318 EXPORT_SYMBOL_GPL(ahci_init_controller
);
1320 static void ahci_dev_config(struct ata_device
*dev
)
1322 struct ahci_host_priv
*hpriv
= dev
->link
->ap
->host
->private_data
;
1324 if (hpriv
->flags
& AHCI_HFLAG_SECT255
) {
1325 dev
->max_sectors
= 255;
1327 "SB600 AHCI: limiting to 255 sectors per cmd\n");
1331 unsigned int ahci_dev_classify(struct ata_port
*ap
)
1333 void __iomem
*port_mmio
= ahci_port_base(ap
);
1334 struct ata_taskfile tf
;
1337 tmp
= readl(port_mmio
+ PORT_SIG
);
1338 tf
.lbah
= (tmp
>> 24) & 0xff;
1339 tf
.lbam
= (tmp
>> 16) & 0xff;
1340 tf
.lbal
= (tmp
>> 8) & 0xff;
1341 tf
.nsect
= (tmp
) & 0xff;
1343 return ata_port_classify(ap
, &tf
);
1345 EXPORT_SYMBOL_GPL(ahci_dev_classify
);
1347 void ahci_fill_cmd_slot(struct ahci_port_priv
*pp
, unsigned int tag
,
1350 dma_addr_t cmd_tbl_dma
;
1352 cmd_tbl_dma
= pp
->cmd_tbl_dma
+ tag
* AHCI_CMD_TBL_SZ
;
1354 pp
->cmd_slot
[tag
].opts
= cpu_to_le32(opts
);
1355 pp
->cmd_slot
[tag
].status
= 0;
1356 pp
->cmd_slot
[tag
].tbl_addr
= cpu_to_le32(cmd_tbl_dma
& 0xffffffff);
1357 pp
->cmd_slot
[tag
].tbl_addr_hi
= cpu_to_le32((cmd_tbl_dma
>> 16) >> 16);
1359 EXPORT_SYMBOL_GPL(ahci_fill_cmd_slot
);
1361 int ahci_kick_engine(struct ata_port
*ap
)
1363 void __iomem
*port_mmio
= ahci_port_base(ap
);
1364 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
1365 u8 status
= readl(port_mmio
+ PORT_TFDATA
) & 0xFF;
1370 rc
= hpriv
->stop_engine(ap
);
1375 * always do CLO if PMP is attached (AHCI-1.3 9.2)
1377 busy
= status
& (ATA_BUSY
| ATA_DRQ
);
1378 if (!busy
&& !sata_pmp_attached(ap
)) {
1383 if (!(hpriv
->cap
& HOST_CAP_CLO
)) {
1389 tmp
= readl(port_mmio
+ PORT_CMD
);
1390 tmp
|= PORT_CMD_CLO
;
1391 writel(tmp
, port_mmio
+ PORT_CMD
);
1394 tmp
= ata_wait_register(ap
, port_mmio
+ PORT_CMD
,
1395 PORT_CMD_CLO
, PORT_CMD_CLO
, 1, 500);
1396 if (tmp
& PORT_CMD_CLO
)
1399 /* restart engine */
1401 hpriv
->start_engine(ap
);
1404 EXPORT_SYMBOL_GPL(ahci_kick_engine
);
1406 static int ahci_exec_polled_cmd(struct ata_port
*ap
, int pmp
,
1407 struct ata_taskfile
*tf
, int is_cmd
, u16 flags
,
1408 unsigned int timeout_msec
)
1410 const u32 cmd_fis_len
= 5; /* five dwords */
1411 struct ahci_port_priv
*pp
= ap
->private_data
;
1412 void __iomem
*port_mmio
= ahci_port_base(ap
);
1413 u8
*fis
= pp
->cmd_tbl
;
1416 /* prep the command */
1417 ata_tf_to_fis(tf
, pmp
, is_cmd
, fis
);
1418 ahci_fill_cmd_slot(pp
, 0, cmd_fis_len
| flags
| (pmp
<< 12));
1420 /* set port value for softreset of Port Multiplier */
1421 if (pp
->fbs_enabled
&& pp
->fbs_last_dev
!= pmp
) {
1422 tmp
= readl(port_mmio
+ PORT_FBS
);
1423 tmp
&= ~(PORT_FBS_DEV_MASK
| PORT_FBS_DEC
);
1424 tmp
|= pmp
<< PORT_FBS_DEV_OFFSET
;
1425 writel(tmp
, port_mmio
+ PORT_FBS
);
1426 pp
->fbs_last_dev
= pmp
;
1430 writel(1, port_mmio
+ PORT_CMD_ISSUE
);
1433 tmp
= ata_wait_register(ap
, port_mmio
+ PORT_CMD_ISSUE
,
1434 0x1, 0x1, 1, timeout_msec
);
1436 ahci_kick_engine(ap
);
1440 readl(port_mmio
+ PORT_CMD_ISSUE
); /* flush */
1445 int ahci_do_softreset(struct ata_link
*link
, unsigned int *class,
1446 int pmp
, unsigned long deadline
,
1447 int (*check_ready
)(struct ata_link
*link
))
1449 struct ata_port
*ap
= link
->ap
;
1450 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
1451 struct ahci_port_priv
*pp
= ap
->private_data
;
1452 const char *reason
= NULL
;
1455 struct ata_taskfile tf
;
1456 bool fbs_disabled
= false;
1459 /* prepare for SRST (AHCI-1.1 10.4.1) */
1460 rc
= ahci_kick_engine(ap
);
1461 if (rc
&& rc
!= -EOPNOTSUPP
)
1462 ata_link_warn(link
, "failed to reset engine (errno=%d)\n", rc
);
1465 * According to AHCI-1.2 9.3.9: if FBS is enable, software shall
1466 * clear PxFBS.EN to '0' prior to issuing software reset to devices
1467 * that is attached to port multiplier.
1469 if (!ata_is_host_link(link
) && pp
->fbs_enabled
) {
1470 ahci_disable_fbs(ap
);
1471 fbs_disabled
= true;
1474 ata_tf_init(link
->device
, &tf
);
1476 /* issue the first H2D Register FIS */
1479 if (time_after(deadline
, now
))
1480 msecs
= jiffies_to_msecs(deadline
- now
);
1483 if (ahci_exec_polled_cmd(ap
, pmp
, &tf
, 0,
1484 AHCI_CMD_RESET
| AHCI_CMD_CLR_BUSY
, msecs
)) {
1486 reason
= "1st FIS failed";
1490 /* spec says at least 5us, but be generous and sleep for 1ms */
1493 /* issue the second H2D Register FIS */
1494 tf
.ctl
&= ~ATA_SRST
;
1495 ahci_exec_polled_cmd(ap
, pmp
, &tf
, 0, 0, 0);
1497 /* wait for link to become ready */
1498 rc
= ata_wait_after_reset(link
, deadline
, check_ready
);
1499 if (rc
== -EBUSY
&& hpriv
->flags
& AHCI_HFLAG_SRST_TOUT_IS_OFFLINE
) {
1501 * Workaround for cases where link online status can't
1502 * be trusted. Treat device readiness timeout as link
1505 ata_link_info(link
, "device not ready, treating as offline\n");
1506 *class = ATA_DEV_NONE
;
1508 /* link occupied, -ENODEV too is an error */
1509 reason
= "device not ready";
1512 *class = ahci_dev_classify(ap
);
1514 /* re-enable FBS if disabled before */
1516 ahci_enable_fbs(ap
);
1521 ata_link_err(link
, "softreset failed (%s)\n", reason
);
1525 int ahci_check_ready(struct ata_link
*link
)
1527 void __iomem
*port_mmio
= ahci_port_base(link
->ap
);
1528 u8 status
= readl(port_mmio
+ PORT_TFDATA
) & 0xFF;
1530 return ata_check_ready(status
);
1532 EXPORT_SYMBOL_GPL(ahci_check_ready
);
1534 static int ahci_softreset(struct ata_link
*link
, unsigned int *class,
1535 unsigned long deadline
)
1537 int pmp
= sata_srst_pmp(link
);
1539 return ahci_do_softreset(link
, class, pmp
, deadline
, ahci_check_ready
);
1541 EXPORT_SYMBOL_GPL(ahci_do_softreset
);
1543 static int ahci_bad_pmp_check_ready(struct ata_link
*link
)
1545 void __iomem
*port_mmio
= ahci_port_base(link
->ap
);
1546 u8 status
= readl(port_mmio
+ PORT_TFDATA
) & 0xFF;
1547 u32 irq_status
= readl(port_mmio
+ PORT_IRQ_STAT
);
1550 * There is no need to check TFDATA if BAD PMP is found due to HW bug,
1551 * which can save timeout delay.
1553 if (irq_status
& PORT_IRQ_BAD_PMP
)
1556 return ata_check_ready(status
);
1559 static int ahci_pmp_retry_softreset(struct ata_link
*link
, unsigned int *class,
1560 unsigned long deadline
)
1562 struct ata_port
*ap
= link
->ap
;
1563 void __iomem
*port_mmio
= ahci_port_base(ap
);
1564 int pmp
= sata_srst_pmp(link
);
1568 rc
= ahci_do_softreset(link
, class, pmp
, deadline
,
1569 ahci_bad_pmp_check_ready
);
1572 * Soft reset fails with IPMS set when PMP is enabled but
1573 * SATA HDD/ODD is connected to SATA port, do soft reset
1577 irq_sts
= readl(port_mmio
+ PORT_IRQ_STAT
);
1578 if (irq_sts
& PORT_IRQ_BAD_PMP
) {
1580 "applying PMP SRST workaround "
1582 rc
= ahci_do_softreset(link
, class, 0, deadline
,
1590 int ahci_do_hardreset(struct ata_link
*link
, unsigned int *class,
1591 unsigned long deadline
, bool *online
)
1593 const unsigned int *timing
= sata_ehc_deb_timing(&link
->eh_context
);
1594 struct ata_port
*ap
= link
->ap
;
1595 struct ahci_port_priv
*pp
= ap
->private_data
;
1596 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
1597 u8
*d2h_fis
= pp
->rx_fis
+ RX_FIS_D2H_REG
;
1598 struct ata_taskfile tf
;
1601 hpriv
->stop_engine(ap
);
1603 /* clear D2H reception area to properly wait for D2H FIS */
1604 ata_tf_init(link
->device
, &tf
);
1605 tf
.status
= ATA_BUSY
;
1606 ata_tf_to_fis(&tf
, 0, 0, d2h_fis
);
1608 ahci_port_clear_pending_irq(ap
);
1610 rc
= sata_link_hardreset(link
, timing
, deadline
, online
,
1613 hpriv
->start_engine(ap
);
1616 *class = ahci_dev_classify(ap
);
1620 EXPORT_SYMBOL_GPL(ahci_do_hardreset
);
1622 static int ahci_hardreset(struct ata_link
*link
, unsigned int *class,
1623 unsigned long deadline
)
1627 return ahci_do_hardreset(link
, class, deadline
, &online
);
1630 static void ahci_postreset(struct ata_link
*link
, unsigned int *class)
1632 struct ata_port
*ap
= link
->ap
;
1633 void __iomem
*port_mmio
= ahci_port_base(ap
);
1636 ata_std_postreset(link
, class);
1638 /* Make sure port's ATAPI bit is set appropriately */
1639 new_tmp
= tmp
= readl(port_mmio
+ PORT_CMD
);
1640 if (*class == ATA_DEV_ATAPI
)
1641 new_tmp
|= PORT_CMD_ATAPI
;
1643 new_tmp
&= ~PORT_CMD_ATAPI
;
1644 if (new_tmp
!= tmp
) {
1645 writel(new_tmp
, port_mmio
+ PORT_CMD
);
1646 readl(port_mmio
+ PORT_CMD
); /* flush */
1650 static unsigned int ahci_fill_sg(struct ata_queued_cmd
*qc
, void *cmd_tbl
)
1652 struct scatterlist
*sg
;
1653 struct ahci_sg
*ahci_sg
= cmd_tbl
+ AHCI_CMD_TBL_HDR_SZ
;
1657 * Next, the S/G list.
1659 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
1660 dma_addr_t addr
= sg_dma_address(sg
);
1661 u32 sg_len
= sg_dma_len(sg
);
1663 ahci_sg
[si
].addr
= cpu_to_le32(addr
& 0xffffffff);
1664 ahci_sg
[si
].addr_hi
= cpu_to_le32((addr
>> 16) >> 16);
1665 ahci_sg
[si
].flags_size
= cpu_to_le32(sg_len
- 1);
1671 static int ahci_pmp_qc_defer(struct ata_queued_cmd
*qc
)
1673 struct ata_port
*ap
= qc
->ap
;
1674 struct ahci_port_priv
*pp
= ap
->private_data
;
1676 if (!sata_pmp_attached(ap
) || pp
->fbs_enabled
)
1677 return ata_std_qc_defer(qc
);
1679 return sata_pmp_qc_defer_cmd_switch(qc
);
1682 static enum ata_completion_errors
ahci_qc_prep(struct ata_queued_cmd
*qc
)
1684 struct ata_port
*ap
= qc
->ap
;
1685 struct ahci_port_priv
*pp
= ap
->private_data
;
1686 int is_atapi
= ata_is_atapi(qc
->tf
.protocol
);
1689 const u32 cmd_fis_len
= 5; /* five dwords */
1690 unsigned int n_elem
;
1693 * Fill in command table information. First, the header,
1694 * a SATA Register - Host to Device command FIS.
1696 cmd_tbl
= pp
->cmd_tbl
+ qc
->hw_tag
* AHCI_CMD_TBL_SZ
;
1698 ata_tf_to_fis(&qc
->tf
, qc
->dev
->link
->pmp
, 1, cmd_tbl
);
1700 memset(cmd_tbl
+ AHCI_CMD_TBL_CDB
, 0, 32);
1701 memcpy(cmd_tbl
+ AHCI_CMD_TBL_CDB
, qc
->cdb
, qc
->dev
->cdb_len
);
1705 if (qc
->flags
& ATA_QCFLAG_DMAMAP
)
1706 n_elem
= ahci_fill_sg(qc
, cmd_tbl
);
1709 * Fill in command slot information.
1711 opts
= cmd_fis_len
| n_elem
<< 16 | (qc
->dev
->link
->pmp
<< 12);
1712 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
)
1713 opts
|= AHCI_CMD_WRITE
;
1715 opts
|= AHCI_CMD_ATAPI
| AHCI_CMD_PREFETCH
;
1717 ahci_fill_cmd_slot(pp
, qc
->hw_tag
, opts
);
1722 static void ahci_fbs_dec_intr(struct ata_port
*ap
)
1724 struct ahci_port_priv
*pp
= ap
->private_data
;
1725 void __iomem
*port_mmio
= ahci_port_base(ap
);
1726 u32 fbs
= readl(port_mmio
+ PORT_FBS
);
1729 BUG_ON(!pp
->fbs_enabled
);
1731 /* time to wait for DEC is not specified by AHCI spec,
1732 * add a retry loop for safety.
1734 writel(fbs
| PORT_FBS_DEC
, port_mmio
+ PORT_FBS
);
1735 fbs
= readl(port_mmio
+ PORT_FBS
);
1736 while ((fbs
& PORT_FBS_DEC
) && retries
--) {
1738 fbs
= readl(port_mmio
+ PORT_FBS
);
1741 if (fbs
& PORT_FBS_DEC
)
1742 dev_err(ap
->host
->dev
, "failed to clear device error\n");
1745 static void ahci_error_intr(struct ata_port
*ap
, u32 irq_stat
)
1747 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
1748 struct ahci_port_priv
*pp
= ap
->private_data
;
1749 struct ata_eh_info
*host_ehi
= &ap
->link
.eh_info
;
1750 struct ata_link
*link
= NULL
;
1751 struct ata_queued_cmd
*active_qc
;
1752 struct ata_eh_info
*active_ehi
;
1753 bool fbs_need_dec
= false;
1756 /* determine active link with error */
1757 if (pp
->fbs_enabled
) {
1758 void __iomem
*port_mmio
= ahci_port_base(ap
);
1759 u32 fbs
= readl(port_mmio
+ PORT_FBS
);
1760 int pmp
= fbs
>> PORT_FBS_DWE_OFFSET
;
1762 if ((fbs
& PORT_FBS_SDE
) && (pmp
< ap
->nr_pmp_links
)) {
1763 link
= &ap
->pmp_link
[pmp
];
1764 fbs_need_dec
= true;
1768 ata_for_each_link(link
, ap
, EDGE
)
1769 if (ata_link_active(link
))
1775 active_qc
= ata_qc_from_tag(ap
, link
->active_tag
);
1776 active_ehi
= &link
->eh_info
;
1778 /* record irq stat */
1779 ata_ehi_clear_desc(host_ehi
);
1780 ata_ehi_push_desc(host_ehi
, "irq_stat 0x%08x", irq_stat
);
1782 /* AHCI needs SError cleared; otherwise, it might lock up */
1783 ahci_scr_read(&ap
->link
, SCR_ERROR
, &serror
);
1784 ahci_scr_write(&ap
->link
, SCR_ERROR
, serror
);
1785 host_ehi
->serror
|= serror
;
1787 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
1788 if (hpriv
->flags
& AHCI_HFLAG_IGN_IRQ_IF_ERR
)
1789 irq_stat
&= ~PORT_IRQ_IF_ERR
;
1791 if (irq_stat
& PORT_IRQ_TF_ERR
) {
1792 /* If qc is active, charge it; otherwise, the active
1793 * link. There's no active qc on NCQ errors. It will
1794 * be determined by EH by reading log page 10h.
1797 active_qc
->err_mask
|= AC_ERR_DEV
;
1799 active_ehi
->err_mask
|= AC_ERR_DEV
;
1801 if (hpriv
->flags
& AHCI_HFLAG_IGN_SERR_INTERNAL
)
1802 host_ehi
->serror
&= ~SERR_INTERNAL
;
1805 if (irq_stat
& PORT_IRQ_UNK_FIS
) {
1806 u32
*unk
= pp
->rx_fis
+ RX_FIS_UNK
;
1808 active_ehi
->err_mask
|= AC_ERR_HSM
;
1809 active_ehi
->action
|= ATA_EH_RESET
;
1810 ata_ehi_push_desc(active_ehi
,
1811 "unknown FIS %08x %08x %08x %08x" ,
1812 unk
[0], unk
[1], unk
[2], unk
[3]);
1815 if (sata_pmp_attached(ap
) && (irq_stat
& PORT_IRQ_BAD_PMP
)) {
1816 active_ehi
->err_mask
|= AC_ERR_HSM
;
1817 active_ehi
->action
|= ATA_EH_RESET
;
1818 ata_ehi_push_desc(active_ehi
, "incorrect PMP");
1821 if (irq_stat
& (PORT_IRQ_HBUS_ERR
| PORT_IRQ_HBUS_DATA_ERR
)) {
1822 host_ehi
->err_mask
|= AC_ERR_HOST_BUS
;
1823 host_ehi
->action
|= ATA_EH_RESET
;
1824 ata_ehi_push_desc(host_ehi
, "host bus error");
1827 if (irq_stat
& PORT_IRQ_IF_ERR
) {
1829 active_ehi
->err_mask
|= AC_ERR_DEV
;
1831 host_ehi
->err_mask
|= AC_ERR_ATA_BUS
;
1832 host_ehi
->action
|= ATA_EH_RESET
;
1835 ata_ehi_push_desc(host_ehi
, "interface fatal error");
1838 if (irq_stat
& (PORT_IRQ_CONNECT
| PORT_IRQ_PHYRDY
)) {
1839 ata_ehi_hotplugged(host_ehi
);
1840 ata_ehi_push_desc(host_ehi
, "%s",
1841 irq_stat
& PORT_IRQ_CONNECT
?
1842 "connection status changed" : "PHY RDY changed");
1845 /* okay, let's hand over to EH */
1847 if (irq_stat
& PORT_IRQ_FREEZE
)
1848 ata_port_freeze(ap
);
1849 else if (fbs_need_dec
) {
1850 ata_link_abort(link
);
1851 ahci_fbs_dec_intr(ap
);
1856 static void ahci_qc_complete(struct ata_port
*ap
, void __iomem
*port_mmio
)
1858 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
1859 struct ahci_port_priv
*pp
= ap
->private_data
;
1864 * pp->active_link is not reliable once FBS is enabled, both
1865 * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
1866 * NCQ and non-NCQ commands may be in flight at the same time.
1868 if (pp
->fbs_enabled
) {
1869 if (ap
->qc_active
) {
1870 qc_active
= readl(port_mmio
+ PORT_SCR_ACT
);
1871 qc_active
|= readl(port_mmio
+ PORT_CMD_ISSUE
);
1874 /* pp->active_link is valid iff any command is in flight */
1875 if (ap
->qc_active
&& pp
->active_link
->sactive
)
1876 qc_active
= readl(port_mmio
+ PORT_SCR_ACT
);
1878 qc_active
= readl(port_mmio
+ PORT_CMD_ISSUE
);
1881 rc
= ata_qc_complete_multiple(ap
, qc_active
);
1882 if (unlikely(rc
< 0 && !(ap
->pflags
& ATA_PFLAG_RESETTING
))) {
1883 ehi
->err_mask
|= AC_ERR_HSM
;
1884 ehi
->action
|= ATA_EH_RESET
;
1885 ata_port_freeze(ap
);
1889 static void ahci_handle_port_interrupt(struct ata_port
*ap
,
1890 void __iomem
*port_mmio
, u32 status
)
1892 struct ahci_port_priv
*pp
= ap
->private_data
;
1893 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
1895 /* ignore BAD_PMP while resetting */
1896 if (unlikely(ap
->pflags
& ATA_PFLAG_RESETTING
))
1897 status
&= ~PORT_IRQ_BAD_PMP
;
1899 if (sata_lpm_ignore_phy_events(&ap
->link
)) {
1900 status
&= ~PORT_IRQ_PHYRDY
;
1901 ahci_scr_write(&ap
->link
, SCR_ERROR
, SERR_PHYRDY_CHG
);
1904 if (unlikely(status
& PORT_IRQ_ERROR
)) {
1906 * Before getting the error notification, we may have
1907 * received SDB FISes notifying successful completions.
1908 * Handle these first and then handle the error.
1910 ahci_qc_complete(ap
, port_mmio
);
1911 ahci_error_intr(ap
, status
);
1915 if (status
& PORT_IRQ_SDB_FIS
) {
1916 /* If SNotification is available, leave notification
1917 * handling to sata_async_notification(). If not,
1918 * emulate it by snooping SDB FIS RX area.
1920 * Snooping FIS RX area is probably cheaper than
1921 * poking SNotification but some constrollers which
1922 * implement SNotification, ICH9 for example, don't
1923 * store AN SDB FIS into receive area.
1925 if (hpriv
->cap
& HOST_CAP_SNTF
)
1926 sata_async_notification(ap
);
1928 /* If the 'N' bit in word 0 of the FIS is set,
1929 * we just received asynchronous notification.
1930 * Tell libata about it.
1932 * Lack of SNotification should not appear in
1933 * ahci 1.2, so the workaround is unnecessary
1934 * when FBS is enabled.
1936 if (pp
->fbs_enabled
)
1939 const __le32
*f
= pp
->rx_fis
+ RX_FIS_SDB
;
1940 u32 f0
= le32_to_cpu(f
[0]);
1942 sata_async_notification(ap
);
1947 /* Handle completed commands */
1948 ahci_qc_complete(ap
, port_mmio
);
1951 static void ahci_port_intr(struct ata_port
*ap
)
1953 void __iomem
*port_mmio
= ahci_port_base(ap
);
1956 status
= readl(port_mmio
+ PORT_IRQ_STAT
);
1957 writel(status
, port_mmio
+ PORT_IRQ_STAT
);
1959 ahci_handle_port_interrupt(ap
, port_mmio
, status
);
1962 static irqreturn_t
ahci_multi_irqs_intr_hard(int irq
, void *dev_instance
)
1964 struct ata_port
*ap
= dev_instance
;
1965 void __iomem
*port_mmio
= ahci_port_base(ap
);
1968 status
= readl(port_mmio
+ PORT_IRQ_STAT
);
1969 writel(status
, port_mmio
+ PORT_IRQ_STAT
);
1971 spin_lock(ap
->lock
);
1972 ahci_handle_port_interrupt(ap
, port_mmio
, status
);
1973 spin_unlock(ap
->lock
);
1978 u32
ahci_handle_port_intr(struct ata_host
*host
, u32 irq_masked
)
1980 unsigned int i
, handled
= 0;
1982 for (i
= 0; i
< host
->n_ports
; i
++) {
1983 struct ata_port
*ap
;
1985 if (!(irq_masked
& (1 << i
)))
1988 ap
= host
->ports
[i
];
1992 if (ata_ratelimit())
1994 "interrupt on disabled port %u\n", i
);
2002 EXPORT_SYMBOL_GPL(ahci_handle_port_intr
);
2004 static irqreturn_t
ahci_single_level_irq_intr(int irq
, void *dev_instance
)
2006 struct ata_host
*host
= dev_instance
;
2007 struct ahci_host_priv
*hpriv
;
2008 unsigned int rc
= 0;
2010 u32 irq_stat
, irq_masked
;
2012 hpriv
= host
->private_data
;
2015 /* sigh. 0xffffffff is a valid return from h/w */
2016 irq_stat
= readl(mmio
+ HOST_IRQ_STAT
);
2020 irq_masked
= irq_stat
& hpriv
->port_map
;
2022 spin_lock(&host
->lock
);
2024 rc
= ahci_handle_port_intr(host
, irq_masked
);
2026 /* HOST_IRQ_STAT behaves as level triggered latch meaning that
2027 * it should be cleared after all the port events are cleared;
2028 * otherwise, it will raise a spurious interrupt after each
2029 * valid one. Please read section 10.6.2 of ahci 1.1 for more
2032 * Also, use the unmasked value to clear interrupt as spurious
2033 * pending event on a dummy port might cause screaming IRQ.
2035 writel(irq_stat
, mmio
+ HOST_IRQ_STAT
);
2037 spin_unlock(&host
->lock
);
2039 return IRQ_RETVAL(rc
);
2042 unsigned int ahci_qc_issue(struct ata_queued_cmd
*qc
)
2044 struct ata_port
*ap
= qc
->ap
;
2045 void __iomem
*port_mmio
= ahci_port_base(ap
);
2046 struct ahci_port_priv
*pp
= ap
->private_data
;
2048 /* Keep track of the currently active link. It will be used
2049 * in completion path to determine whether NCQ phase is in
2052 pp
->active_link
= qc
->dev
->link
;
2054 if (ata_is_ncq(qc
->tf
.protocol
))
2055 writel(1 << qc
->hw_tag
, port_mmio
+ PORT_SCR_ACT
);
2057 if (pp
->fbs_enabled
&& pp
->fbs_last_dev
!= qc
->dev
->link
->pmp
) {
2058 u32 fbs
= readl(port_mmio
+ PORT_FBS
);
2059 fbs
&= ~(PORT_FBS_DEV_MASK
| PORT_FBS_DEC
);
2060 fbs
|= qc
->dev
->link
->pmp
<< PORT_FBS_DEV_OFFSET
;
2061 writel(fbs
, port_mmio
+ PORT_FBS
);
2062 pp
->fbs_last_dev
= qc
->dev
->link
->pmp
;
2065 writel(1 << qc
->hw_tag
, port_mmio
+ PORT_CMD_ISSUE
);
2067 ahci_sw_activity(qc
->dev
->link
);
2071 EXPORT_SYMBOL_GPL(ahci_qc_issue
);
2073 static void ahci_qc_fill_rtf(struct ata_queued_cmd
*qc
)
2075 struct ahci_port_priv
*pp
= qc
->ap
->private_data
;
2076 u8
*rx_fis
= pp
->rx_fis
;
2078 if (pp
->fbs_enabled
)
2079 rx_fis
+= qc
->dev
->link
->pmp
* AHCI_RX_FIS_SZ
;
2082 * After a successful execution of an ATA PIO data-in command,
2083 * the device doesn't send D2H Reg FIS to update the TF and
2084 * the host should take TF and E_Status from the preceding PIO
2087 if (qc
->tf
.protocol
== ATA_PROT_PIO
&& qc
->dma_dir
== DMA_FROM_DEVICE
&&
2088 !(qc
->flags
& ATA_QCFLAG_EH
)) {
2089 ata_tf_from_fis(rx_fis
+ RX_FIS_PIO_SETUP
, &qc
->result_tf
);
2090 qc
->result_tf
.status
= (rx_fis
+ RX_FIS_PIO_SETUP
)[15];
2095 * For NCQ commands, we never get a D2H FIS, so reading the D2H Register
2096 * FIS area of the Received FIS Structure (which contains a copy of the
2097 * last D2H FIS received) will contain an outdated status code.
2098 * For NCQ commands, we instead get a SDB FIS, so read the SDB FIS area
2099 * instead. However, the SDB FIS does not contain the LBA, so we can't
2100 * use the ata_tf_from_fis() helper.
2102 if (ata_is_ncq(qc
->tf
.protocol
)) {
2103 const u8
*fis
= rx_fis
+ RX_FIS_SDB
;
2106 * Successful NCQ commands have been filled already.
2107 * A failed NCQ command will read the status here.
2108 * (Note that a failed NCQ command will get a more specific
2109 * error when reading the NCQ Command Error log.)
2111 qc
->result_tf
.status
= fis
[2];
2112 qc
->result_tf
.error
= fis
[3];
2116 ata_tf_from_fis(rx_fis
+ RX_FIS_D2H_REG
, &qc
->result_tf
);
2119 static void ahci_qc_ncq_fill_rtf(struct ata_port
*ap
, u64 done_mask
)
2121 struct ahci_port_priv
*pp
= ap
->private_data
;
2124 /* No outstanding commands. */
2129 * FBS not enabled, so read status and error once, since they are shared
2132 if (!pp
->fbs_enabled
) {
2135 /* No outstanding NCQ commands. */
2136 if (!pp
->active_link
->sactive
)
2139 fis
= pp
->rx_fis
+ RX_FIS_SDB
;
2144 struct ata_queued_cmd
*qc
;
2145 unsigned int tag
= __ffs64(done_mask
);
2147 qc
= ata_qc_from_tag(ap
, tag
);
2148 if (qc
&& ata_is_ncq(qc
->tf
.protocol
)) {
2149 qc
->result_tf
.status
= status
;
2150 qc
->result_tf
.error
= error
;
2151 qc
->result_tf
.flags
= qc
->tf
.flags
;
2152 qc
->flags
|= ATA_QCFLAG_RTF_FILLED
;
2154 done_mask
&= ~(1ULL << tag
);
2161 * FBS enabled, so read the status and error for each QC, since the QCs
2162 * can belong to different PMP links. (Each PMP link has its own FIS
2166 struct ata_queued_cmd
*qc
;
2167 unsigned int tag
= __ffs64(done_mask
);
2169 qc
= ata_qc_from_tag(ap
, tag
);
2170 if (qc
&& ata_is_ncq(qc
->tf
.protocol
)) {
2172 fis
+= qc
->dev
->link
->pmp
* AHCI_RX_FIS_SZ
;
2174 qc
->result_tf
.status
= fis
[2];
2175 qc
->result_tf
.error
= fis
[3];
2176 qc
->result_tf
.flags
= qc
->tf
.flags
;
2177 qc
->flags
|= ATA_QCFLAG_RTF_FILLED
;
2179 done_mask
&= ~(1ULL << tag
);
2183 static void ahci_freeze(struct ata_port
*ap
)
2185 void __iomem
*port_mmio
= ahci_port_base(ap
);
2188 writel(0, port_mmio
+ PORT_IRQ_MASK
);
2191 static void ahci_thaw(struct ata_port
*ap
)
2193 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
2194 void __iomem
*mmio
= hpriv
->mmio
;
2195 void __iomem
*port_mmio
= ahci_port_base(ap
);
2197 struct ahci_port_priv
*pp
= ap
->private_data
;
2200 tmp
= readl(port_mmio
+ PORT_IRQ_STAT
);
2201 writel(tmp
, port_mmio
+ PORT_IRQ_STAT
);
2202 writel(1 << ap
->port_no
, mmio
+ HOST_IRQ_STAT
);
2204 /* turn IRQ back on */
2205 writel(pp
->intr_mask
, port_mmio
+ PORT_IRQ_MASK
);
2208 void ahci_error_handler(struct ata_port
*ap
)
2210 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
2212 if (!ata_port_is_frozen(ap
)) {
2213 /* restart engine */
2214 hpriv
->stop_engine(ap
);
2215 hpriv
->start_engine(ap
);
2218 sata_pmp_error_handler(ap
);
2220 if (!ata_dev_enabled(ap
->link
.device
))
2221 hpriv
->stop_engine(ap
);
2223 EXPORT_SYMBOL_GPL(ahci_error_handler
);
2225 static void ahci_post_internal_cmd(struct ata_queued_cmd
*qc
)
2227 struct ata_port
*ap
= qc
->ap
;
2229 /* make DMA engine forget about the failed command */
2230 if (qc
->flags
& ATA_QCFLAG_EH
)
2231 ahci_kick_engine(ap
);
2234 static void ahci_set_aggressive_devslp(struct ata_port
*ap
, bool sleep
)
2236 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
2237 void __iomem
*port_mmio
= ahci_port_base(ap
);
2238 struct ata_device
*dev
= ap
->link
.device
;
2239 u32 devslp
, dm
, dito
, mdat
, deto
, dito_conf
;
2241 unsigned int err_mask
;
2243 devslp
= readl(port_mmio
+ PORT_DEVSLP
);
2244 if (!(devslp
& PORT_DEVSLP_DSP
)) {
2245 dev_info(ap
->host
->dev
, "port does not support device sleep\n");
2249 /* disable device sleep */
2251 if (devslp
& PORT_DEVSLP_ADSE
) {
2252 writel(devslp
& ~PORT_DEVSLP_ADSE
,
2253 port_mmio
+ PORT_DEVSLP
);
2254 err_mask
= ata_dev_set_feature(dev
,
2255 SETFEATURES_SATA_DISABLE
,
2257 if (err_mask
&& err_mask
!= AC_ERR_DEV
)
2258 ata_dev_warn(dev
, "failed to disable DEVSLP\n");
2263 dm
= (devslp
& PORT_DEVSLP_DM_MASK
) >> PORT_DEVSLP_DM_OFFSET
;
2264 dito
= devslp_idle_timeout
/ (dm
+ 1);
2268 dito_conf
= (devslp
>> PORT_DEVSLP_DITO_OFFSET
) & 0x3FF;
2270 /* device sleep was already enabled and same dito */
2271 if ((devslp
& PORT_DEVSLP_ADSE
) && (dito_conf
== dito
))
2274 /* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */
2275 rc
= hpriv
->stop_engine(ap
);
2279 /* Use the nominal value 10 ms if the read MDAT is zero,
2280 * the nominal value of DETO is 20 ms.
2282 if (dev
->devslp_timing
[ATA_LOG_DEVSLP_VALID
] &
2283 ATA_LOG_DEVSLP_VALID_MASK
) {
2284 mdat
= dev
->devslp_timing
[ATA_LOG_DEVSLP_MDAT
] &
2285 ATA_LOG_DEVSLP_MDAT_MASK
;
2288 deto
= dev
->devslp_timing
[ATA_LOG_DEVSLP_DETO
];
2296 /* Make dito, mdat, deto bits to 0s */
2297 devslp
&= ~GENMASK_ULL(24, 2);
2298 devslp
|= ((dito
<< PORT_DEVSLP_DITO_OFFSET
) |
2299 (mdat
<< PORT_DEVSLP_MDAT_OFFSET
) |
2300 (deto
<< PORT_DEVSLP_DETO_OFFSET
) |
2302 writel(devslp
, port_mmio
+ PORT_DEVSLP
);
2304 hpriv
->start_engine(ap
);
2306 /* enable device sleep feature for the drive */
2307 err_mask
= ata_dev_set_feature(dev
,
2308 SETFEATURES_SATA_ENABLE
,
2310 if (err_mask
&& err_mask
!= AC_ERR_DEV
)
2311 ata_dev_warn(dev
, "failed to enable DEVSLP\n");
2314 static void ahci_enable_fbs(struct ata_port
*ap
)
2316 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
2317 struct ahci_port_priv
*pp
= ap
->private_data
;
2318 void __iomem
*port_mmio
= ahci_port_base(ap
);
2322 if (!pp
->fbs_supported
)
2325 fbs
= readl(port_mmio
+ PORT_FBS
);
2326 if (fbs
& PORT_FBS_EN
) {
2327 pp
->fbs_enabled
= true;
2328 pp
->fbs_last_dev
= -1; /* initialization */
2332 rc
= hpriv
->stop_engine(ap
);
2336 writel(fbs
| PORT_FBS_EN
, port_mmio
+ PORT_FBS
);
2337 fbs
= readl(port_mmio
+ PORT_FBS
);
2338 if (fbs
& PORT_FBS_EN
) {
2339 dev_info(ap
->host
->dev
, "FBS is enabled\n");
2340 pp
->fbs_enabled
= true;
2341 pp
->fbs_last_dev
= -1; /* initialization */
2343 dev_err(ap
->host
->dev
, "Failed to enable FBS\n");
2345 hpriv
->start_engine(ap
);
2348 static void ahci_disable_fbs(struct ata_port
*ap
)
2350 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
2351 struct ahci_port_priv
*pp
= ap
->private_data
;
2352 void __iomem
*port_mmio
= ahci_port_base(ap
);
2356 if (!pp
->fbs_supported
)
2359 fbs
= readl(port_mmio
+ PORT_FBS
);
2360 if ((fbs
& PORT_FBS_EN
) == 0) {
2361 pp
->fbs_enabled
= false;
2365 rc
= hpriv
->stop_engine(ap
);
2369 writel(fbs
& ~PORT_FBS_EN
, port_mmio
+ PORT_FBS
);
2370 fbs
= readl(port_mmio
+ PORT_FBS
);
2371 if (fbs
& PORT_FBS_EN
)
2372 dev_err(ap
->host
->dev
, "Failed to disable FBS\n");
2374 dev_info(ap
->host
->dev
, "FBS is disabled\n");
2375 pp
->fbs_enabled
= false;
2378 hpriv
->start_engine(ap
);
2381 static void ahci_pmp_attach(struct ata_port
*ap
)
2383 void __iomem
*port_mmio
= ahci_port_base(ap
);
2384 struct ahci_port_priv
*pp
= ap
->private_data
;
2387 cmd
= readl(port_mmio
+ PORT_CMD
);
2388 cmd
|= PORT_CMD_PMP
;
2389 writel(cmd
, port_mmio
+ PORT_CMD
);
2391 ahci_enable_fbs(ap
);
2393 pp
->intr_mask
|= PORT_IRQ_BAD_PMP
;
2396 * We must not change the port interrupt mask register if the
2397 * port is marked frozen, the value in pp->intr_mask will be
2398 * restored later when the port is thawed.
2400 * Note that during initialization, the port is marked as
2401 * frozen since the irq handler is not yet registered.
2403 if (!ata_port_is_frozen(ap
))
2404 writel(pp
->intr_mask
, port_mmio
+ PORT_IRQ_MASK
);
2407 static void ahci_pmp_detach(struct ata_port
*ap
)
2409 void __iomem
*port_mmio
= ahci_port_base(ap
);
2410 struct ahci_port_priv
*pp
= ap
->private_data
;
2413 ahci_disable_fbs(ap
);
2415 cmd
= readl(port_mmio
+ PORT_CMD
);
2416 cmd
&= ~PORT_CMD_PMP
;
2417 writel(cmd
, port_mmio
+ PORT_CMD
);
2419 pp
->intr_mask
&= ~PORT_IRQ_BAD_PMP
;
2421 /* see comment above in ahci_pmp_attach() */
2422 if (!ata_port_is_frozen(ap
))
2423 writel(pp
->intr_mask
, port_mmio
+ PORT_IRQ_MASK
);
2426 int ahci_port_resume(struct ata_port
*ap
)
2428 ahci_rpm_get_port(ap
);
2431 ahci_start_port(ap
);
2433 if (sata_pmp_attached(ap
))
2434 ahci_pmp_attach(ap
);
2436 ahci_pmp_detach(ap
);
2440 EXPORT_SYMBOL_GPL(ahci_port_resume
);
2443 static void ahci_handle_s2idle(struct ata_port
*ap
)
2445 void __iomem
*port_mmio
= ahci_port_base(ap
);
2448 if (pm_suspend_via_firmware())
2450 devslp
= readl(port_mmio
+ PORT_DEVSLP
);
2451 if ((devslp
& PORT_DEVSLP_ADSE
))
2452 ata_msleep(ap
, devslp_idle_timeout
);
2455 static int ahci_port_suspend(struct ata_port
*ap
, pm_message_t mesg
)
2457 const char *emsg
= NULL
;
2460 rc
= ahci_deinit_port(ap
, &emsg
);
2462 ahci_power_down(ap
);
2464 ata_port_err(ap
, "%s (%d)\n", emsg
, rc
);
2465 ata_port_freeze(ap
);
2468 if (acpi_storage_d3(ap
->host
->dev
))
2469 ahci_handle_s2idle(ap
);
2471 ahci_rpm_put_port(ap
);
2476 static int ahci_port_start(struct ata_port
*ap
)
2478 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
2479 struct device
*dev
= ap
->host
->dev
;
2480 struct ahci_port_priv
*pp
;
2483 size_t dma_sz
, rx_fis_sz
;
2485 pp
= devm_kzalloc(dev
, sizeof(*pp
), GFP_KERNEL
);
2489 if (ap
->host
->n_ports
> 1) {
2490 pp
->irq_desc
= devm_kzalloc(dev
, 8, GFP_KERNEL
);
2491 if (!pp
->irq_desc
) {
2492 devm_kfree(dev
, pp
);
2495 snprintf(pp
->irq_desc
, 8,
2496 "%s%d", dev_driver_string(dev
), ap
->port_no
);
2499 /* check FBS capability */
2500 if ((hpriv
->cap
& HOST_CAP_FBS
) && sata_pmp_supported(ap
)) {
2501 void __iomem
*port_mmio
= ahci_port_base(ap
);
2502 u32 cmd
= readl(port_mmio
+ PORT_CMD
);
2503 if (cmd
& PORT_CMD_FBSCP
)
2504 pp
->fbs_supported
= true;
2505 else if (hpriv
->flags
& AHCI_HFLAG_YES_FBS
) {
2506 dev_info(dev
, "port %d can do FBS, forcing FBSCP\n",
2508 pp
->fbs_supported
= true;
2510 dev_warn(dev
, "port %d is not capable of FBS\n",
2514 if (pp
->fbs_supported
) {
2515 dma_sz
= AHCI_PORT_PRIV_FBS_DMA_SZ
;
2516 rx_fis_sz
= AHCI_RX_FIS_SZ
* 16;
2518 dma_sz
= AHCI_PORT_PRIV_DMA_SZ
;
2519 rx_fis_sz
= AHCI_RX_FIS_SZ
;
2522 mem
= dmam_alloc_coherent(dev
, dma_sz
, &mem_dma
, GFP_KERNEL
);
2527 * First item in chunk of DMA memory: 32-slot command table,
2528 * 32 bytes each in size
2531 pp
->cmd_slot_dma
= mem_dma
;
2533 mem
+= AHCI_CMD_SLOT_SZ
;
2534 mem_dma
+= AHCI_CMD_SLOT_SZ
;
2537 * Second item: Received-FIS area
2540 pp
->rx_fis_dma
= mem_dma
;
2543 mem_dma
+= rx_fis_sz
;
2546 * Third item: data area for storing a single command
2547 * and its scatter-gather table
2550 pp
->cmd_tbl_dma
= mem_dma
;
2553 * Save off initial list of interrupts to be enabled.
2554 * This could be changed later
2556 pp
->intr_mask
= DEF_PORT_IRQ
;
2559 * Switch to per-port locking in case each port has its own MSI vector.
2561 if (hpriv
->flags
& AHCI_HFLAG_MULTI_MSI
) {
2562 spin_lock_init(&pp
->lock
);
2563 ap
->lock
= &pp
->lock
;
2566 ap
->private_data
= pp
;
2568 /* engage engines, captain */
2569 return ahci_port_resume(ap
);
2572 static void ahci_port_stop(struct ata_port
*ap
)
2574 const char *emsg
= NULL
;
2575 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
2576 void __iomem
*host_mmio
= hpriv
->mmio
;
2579 /* de-initialize port */
2580 rc
= ahci_deinit_port(ap
, &emsg
);
2582 ata_port_warn(ap
, "%s (%d)\n", emsg
, rc
);
2585 * Clear GHC.IS to prevent stuck INTx after disabling MSI and
2588 writel(1 << ap
->port_no
, host_mmio
+ HOST_IRQ_STAT
);
2590 ahci_rpm_put_port(ap
);
2593 void ahci_print_info(struct ata_host
*host
, const char *scc_s
)
2595 struct ahci_host_priv
*hpriv
= host
->private_data
;
2596 u32 vers
, cap
, cap2
, impl
, speed
;
2597 const char *speed_s
;
2599 vers
= hpriv
->version
;
2602 impl
= hpriv
->port_map
;
2604 speed
= (cap
>> 20) & 0xf;
2607 else if (speed
== 2)
2609 else if (speed
== 3)
2615 "AHCI vers %02x%02x.%02x%02x, "
2616 "%u command slots, %s Gbps, %s mode\n"
2619 (vers
>> 24) & 0xff,
2620 (vers
>> 16) & 0xff,
2624 ((cap
>> 8) & 0x1f) + 1,
2629 "%u/%u ports implemented (port mask 0x%x)\n"
2644 cap
& HOST_CAP_64
? "64bit " : "",
2645 cap
& HOST_CAP_NCQ
? "ncq " : "",
2646 cap
& HOST_CAP_SNTF
? "sntf " : "",
2647 cap
& HOST_CAP_MPS
? "ilck " : "",
2648 cap
& HOST_CAP_SSS
? "stag " : "",
2649 cap
& HOST_CAP_ALPM
? "pm " : "",
2650 cap
& HOST_CAP_LED
? "led " : "",
2651 cap
& HOST_CAP_CLO
? "clo " : "",
2652 cap
& HOST_CAP_ONLY
? "only " : "",
2653 cap
& HOST_CAP_PMP
? "pmp " : "",
2654 cap
& HOST_CAP_FBS
? "fbs " : "",
2655 cap
& HOST_CAP_PIO_MULTI
? "pio " : "",
2656 cap
& HOST_CAP_SSC
? "slum " : "",
2657 cap
& HOST_CAP_PART
? "part " : "",
2658 cap
& HOST_CAP_CCC
? "ccc " : "",
2659 cap
& HOST_CAP_EMS
? "ems " : "",
2660 cap
& HOST_CAP_SXS
? "sxs " : "",
2661 cap2
& HOST_CAP2_DESO
? "deso " : "",
2662 cap2
& HOST_CAP2_SADM
? "sadm " : "",
2663 cap2
& HOST_CAP2_SDS
? "sds " : "",
2664 cap2
& HOST_CAP2_APST
? "apst " : "",
2665 cap2
& HOST_CAP2_NVMHCI
? "nvmp " : "",
2666 cap2
& HOST_CAP2_BOH
? "boh " : ""
2669 EXPORT_SYMBOL_GPL(ahci_print_info
);
2671 void ahci_set_em_messages(struct ahci_host_priv
*hpriv
,
2672 struct ata_port_info
*pi
)
2675 void __iomem
*mmio
= hpriv
->mmio
;
2676 u32 em_loc
= readl(mmio
+ HOST_EM_LOC
);
2677 u32 em_ctl
= readl(mmio
+ HOST_EM_CTL
);
2679 if (!ahci_em_messages
|| !(hpriv
->cap
& HOST_CAP_EMS
))
2682 messages
= (em_ctl
& EM_CTRL_MSG_TYPE
) >> 16;
2686 hpriv
->em_loc
= ((em_loc
>> 16) * 4);
2687 hpriv
->em_buf_sz
= ((em_loc
& 0xff) * 4);
2688 hpriv
->em_msg_type
= messages
;
2689 pi
->flags
|= ATA_FLAG_EM
;
2690 if (!(em_ctl
& EM_CTL_ALHD
))
2691 pi
->flags
|= ATA_FLAG_SW_ACTIVITY
;
2694 EXPORT_SYMBOL_GPL(ahci_set_em_messages
);
2696 static int ahci_host_activate_multi_irqs(struct ata_host
*host
,
2697 const struct scsi_host_template
*sht
)
2699 struct ahci_host_priv
*hpriv
= host
->private_data
;
2702 rc
= ata_host_start(host
);
2706 * Requests IRQs according to AHCI-1.1 when multiple MSIs were
2707 * allocated. That is one MSI per port, starting from @irq.
2709 for (i
= 0; i
< host
->n_ports
; i
++) {
2710 struct ahci_port_priv
*pp
= host
->ports
[i
]->private_data
;
2711 int irq
= hpriv
->get_irq_vector(host
, i
);
2713 /* Do not receive interrupts sent by dummy ports */
2719 rc
= devm_request_irq(host
->dev
, irq
, ahci_multi_irqs_intr_hard
,
2720 0, pp
->irq_desc
, host
->ports
[i
]);
2724 ata_port_desc_misc(host
->ports
[i
], irq
);
2727 return ata_host_register(host
, sht
);
2731 * ahci_host_activate - start AHCI host, request IRQs and register it
2732 * @host: target ATA host
2733 * @sht: scsi_host_template to use when registering the host
2736 * Inherited from calling layer (may sleep).
2739 * 0 on success, -errno otherwise.
2741 int ahci_host_activate(struct ata_host
*host
, const struct scsi_host_template
*sht
)
2743 struct ahci_host_priv
*hpriv
= host
->private_data
;
2744 int irq
= hpriv
->irq
;
2747 if (hpriv
->flags
& AHCI_HFLAG_MULTI_MSI
) {
2748 if (hpriv
->irq_handler
&&
2749 hpriv
->irq_handler
!= ahci_single_level_irq_intr
)
2751 "both AHCI_HFLAG_MULTI_MSI flag set and custom irq handler implemented\n");
2752 if (!hpriv
->get_irq_vector
) {
2754 "AHCI_HFLAG_MULTI_MSI requires ->get_irq_vector!\n");
2758 rc
= ahci_host_activate_multi_irqs(host
, sht
);
2760 rc
= ata_host_activate(host
, irq
, hpriv
->irq_handler
,
2767 EXPORT_SYMBOL_GPL(ahci_host_activate
);
2769 MODULE_AUTHOR("Jeff Garzik");
2770 MODULE_DESCRIPTION("Common AHCI SATA low-level routines");
2771 MODULE_LICENSE("GPL");