2 * Calxeda Highbank AHCI SATA platform driver
3 * Copyright 2012 Calxeda, Inc.
5 * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/kernel.h>
20 #include <linux/gfp.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/err.h>
25 #include <linux/spinlock.h>
26 #include <linux/device.h>
27 #include <linux/of_device.h>
28 #include <linux/of_address.h>
29 #include <linux/platform_device.h>
30 #include <linux/libata.h>
31 #include <linux/interrupt.h>
32 #include <linux/delay.h>
33 #include <linux/export.h>
34 #include <linux/gpio.h>
35 #include <linux/of_gpio.h>
39 #define CPHY_MAP(dev, addr) ((((dev) & 0x1f) << 7) | (((addr) >> 9) & 0x7f))
40 #define CPHY_ADDR(addr) (((addr) & 0x1ff) << 2)
41 #define SERDES_CR_CTL 0x80a0
42 #define SERDES_CR_ADDR 0x80a1
43 #define SERDES_CR_DATA 0x80a2
44 #define CR_BUSY 0x0001
45 #define CR_START 0x0001
46 #define CR_WR_RDN 0x0002
47 #define CPHY_TX_INPUT_STS 0x2001
48 #define CPHY_RX_INPUT_STS 0x2002
49 #define CPHY_SATA_TX_OVERRIDE 0x8000
50 #define CPHY_SATA_RX_OVERRIDE 0x4000
51 #define CPHY_TX_OVERRIDE 0x2004
52 #define CPHY_RX_OVERRIDE 0x2005
53 #define SPHY_LANE 0x100
54 #define SPHY_HALF_RATE 0x0001
55 #define CPHY_SATA_DPLL_MODE 0x0700
56 #define CPHY_SATA_DPLL_SHIFT 8
57 #define CPHY_SATA_DPLL_RESET (1 << 11)
58 #define CPHY_SATA_TX_ATTEN 0x1c00
59 #define CPHY_SATA_TX_ATTEN_SHIFT 10
60 #define CPHY_PHY_COUNT 6
61 #define CPHY_LANE_COUNT 4
62 #define CPHY_PORT_COUNT (CPHY_PHY_COUNT * CPHY_LANE_COUNT)
64 static DEFINE_SPINLOCK(cphy_lock
);
65 /* Each of the 6 phys can have up to 4 sata ports attached to i. Map 0-based
66 * sata ports to their phys and then to their lanes within the phys
68 struct phy_lane_info
{
69 void __iomem
*phy_base
;
74 static struct phy_lane_info port_data
[CPHY_PORT_COUNT
];
76 static DEFINE_SPINLOCK(sgpio_lock
);
83 struct ecx_plat_data
{
85 /* number of extra clocks that the SGPIO PIC controller expects */
88 unsigned sgpio_gpio
[SGPIO_PINS
];
90 u32 port_to_sgpio
[SGPIO_PORTS
];
93 #define SGPIO_SIGNALS 3
94 #define ECX_ACTIVITY_BITS 0x300000
95 #define ECX_ACTIVITY_SHIFT 0
96 #define ECX_LOCATE_BITS 0x80000
97 #define ECX_LOCATE_SHIFT 1
98 #define ECX_FAULT_BITS 0x400000
99 #define ECX_FAULT_SHIFT 2
100 static inline int sgpio_bit_shift(struct ecx_plat_data
*pdata
, u32 port
,
103 return 1 << (3 * pdata
->port_to_sgpio
[port
] + shift
);
106 static void ecx_parse_sgpio(struct ecx_plat_data
*pdata
, u32 port
, u32 state
)
108 if (state
& ECX_ACTIVITY_BITS
)
109 pdata
->sgpio_pattern
|= sgpio_bit_shift(pdata
, port
,
112 pdata
->sgpio_pattern
&= ~sgpio_bit_shift(pdata
, port
,
114 if (state
& ECX_LOCATE_BITS
)
115 pdata
->sgpio_pattern
|= sgpio_bit_shift(pdata
, port
,
118 pdata
->sgpio_pattern
&= ~sgpio_bit_shift(pdata
, port
,
120 if (state
& ECX_FAULT_BITS
)
121 pdata
->sgpio_pattern
|= sgpio_bit_shift(pdata
, port
,
124 pdata
->sgpio_pattern
&= ~sgpio_bit_shift(pdata
, port
,
129 * Tell the LED controller that the signal has changed by raising the clock
130 * line for 50 uS and then lowering it for 50 uS.
132 static void ecx_led_cycle_clock(struct ecx_plat_data
*pdata
)
134 gpio_set_value(pdata
->sgpio_gpio
[SCLOCK
], 1);
136 gpio_set_value(pdata
->sgpio_gpio
[SCLOCK
], 0);
140 static ssize_t
ecx_transmit_led_message(struct ata_port
*ap
, u32 state
,
143 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
144 struct ecx_plat_data
*pdata
= hpriv
->plat_data
;
145 struct ahci_port_priv
*pp
= ap
->private_data
;
148 struct ahci_em_priv
*emp
;
151 /* get the slot number from the message */
152 pmp
= (state
& EM_MSG_LED_PMP_SLOT
) >> 8;
153 if (pmp
< EM_MAX_SLOTS
)
154 emp
= &pp
->em_priv
[pmp
];
158 if (!(hpriv
->em_msg_type
& EM_MSG_TYPE_LED
))
161 spin_lock_irqsave(&sgpio_lock
, flags
);
162 ecx_parse_sgpio(pdata
, ap
->port_no
, state
);
163 sgpio_out
= pdata
->sgpio_pattern
;
164 for (i
= 0; i
< pdata
->pre_clocks
; i
++)
165 ecx_led_cycle_clock(pdata
);
167 gpio_set_value(pdata
->sgpio_gpio
[SLOAD
], 1);
168 ecx_led_cycle_clock(pdata
);
169 gpio_set_value(pdata
->sgpio_gpio
[SLOAD
], 0);
171 * bit-bang out the SGPIO pattern, by consuming a bit and then
174 for (i
= 0; i
< (SGPIO_SIGNALS
* pdata
->n_ports
); i
++) {
175 gpio_set_value(pdata
->sgpio_gpio
[SDATA
], sgpio_out
& 1);
177 ecx_led_cycle_clock(pdata
);
179 for (i
= 0; i
< pdata
->post_clocks
; i
++)
180 ecx_led_cycle_clock(pdata
);
182 /* save off new led state for port/slot */
183 emp
->led_state
= state
;
185 spin_unlock_irqrestore(&sgpio_lock
, flags
);
189 static void highbank_set_em_messages(struct device
*dev
,
190 struct ahci_host_priv
*hpriv
,
191 struct ata_port_info
*pi
)
193 struct device_node
*np
= dev
->of_node
;
194 struct ecx_plat_data
*pdata
= hpriv
->plat_data
;
198 for (i
= 0; i
< SGPIO_PINS
; i
++) {
199 err
= of_get_named_gpio(np
, "calxeda,sgpio-gpio", i
);
200 if (IS_ERR_VALUE(err
))
203 pdata
->sgpio_gpio
[i
] = err
;
204 err
= gpio_request(pdata
->sgpio_gpio
[i
], "CX SGPIO");
206 pr_err("sata_highbank gpio_request %d failed: %d\n",
210 gpio_direction_output(pdata
->sgpio_gpio
[i
], 1);
212 of_property_read_u32_array(np
, "calxeda,led-order",
213 pdata
->port_to_sgpio
,
215 if (of_property_read_u32(np
, "calxeda,pre-clocks", &pdata
->pre_clocks
))
216 pdata
->pre_clocks
= 0;
217 if (of_property_read_u32(np
, "calxeda,post-clocks",
218 &pdata
->post_clocks
))
219 pdata
->post_clocks
= 0;
223 hpriv
->em_buf_sz
= 4;
224 hpriv
->em_msg_type
= EM_MSG_TYPE_LED
;
225 pi
->flags
|= ATA_FLAG_EM
| ATA_FLAG_SW_ACTIVITY
;
228 static u32
__combo_phy_reg_read(u8 sata_port
, u32 addr
)
231 u8 dev
= port_data
[sata_port
].phy_devs
;
232 spin_lock(&cphy_lock
);
233 writel(CPHY_MAP(dev
, addr
), port_data
[sata_port
].phy_base
+ 0x800);
234 data
= readl(port_data
[sata_port
].phy_base
+ CPHY_ADDR(addr
));
235 spin_unlock(&cphy_lock
);
239 static void __combo_phy_reg_write(u8 sata_port
, u32 addr
, u32 data
)
241 u8 dev
= port_data
[sata_port
].phy_devs
;
242 spin_lock(&cphy_lock
);
243 writel(CPHY_MAP(dev
, addr
), port_data
[sata_port
].phy_base
+ 0x800);
244 writel(data
, port_data
[sata_port
].phy_base
+ CPHY_ADDR(addr
));
245 spin_unlock(&cphy_lock
);
248 static void combo_phy_wait_for_ready(u8 sata_port
)
250 while (__combo_phy_reg_read(sata_port
, SERDES_CR_CTL
) & CR_BUSY
)
254 static u32
combo_phy_read(u8 sata_port
, u32 addr
)
256 combo_phy_wait_for_ready(sata_port
);
257 __combo_phy_reg_write(sata_port
, SERDES_CR_ADDR
, addr
);
258 __combo_phy_reg_write(sata_port
, SERDES_CR_CTL
, CR_START
);
259 combo_phy_wait_for_ready(sata_port
);
260 return __combo_phy_reg_read(sata_port
, SERDES_CR_DATA
);
263 static void combo_phy_write(u8 sata_port
, u32 addr
, u32 data
)
265 combo_phy_wait_for_ready(sata_port
);
266 __combo_phy_reg_write(sata_port
, SERDES_CR_ADDR
, addr
);
267 __combo_phy_reg_write(sata_port
, SERDES_CR_DATA
, data
);
268 __combo_phy_reg_write(sata_port
, SERDES_CR_CTL
, CR_WR_RDN
| CR_START
);
271 static void highbank_cphy_disable_overrides(u8 sata_port
)
273 u8 lane
= port_data
[sata_port
].lane_mapping
;
275 if (unlikely(port_data
[sata_port
].phy_base
== NULL
))
277 tmp
= combo_phy_read(sata_port
, CPHY_RX_INPUT_STS
+ lane
* SPHY_LANE
);
278 tmp
&= ~CPHY_SATA_RX_OVERRIDE
;
279 combo_phy_write(sata_port
, CPHY_RX_OVERRIDE
+ lane
* SPHY_LANE
, tmp
);
282 static void cphy_override_tx_attenuation(u8 sata_port
, u32 val
)
284 u8 lane
= port_data
[sata_port
].lane_mapping
;
290 tmp
= combo_phy_read(sata_port
, CPHY_TX_INPUT_STS
+ lane
* SPHY_LANE
);
291 tmp
&= ~CPHY_SATA_TX_OVERRIDE
;
292 combo_phy_write(sata_port
, CPHY_TX_OVERRIDE
+ lane
* SPHY_LANE
, tmp
);
294 tmp
|= CPHY_SATA_TX_OVERRIDE
;
295 combo_phy_write(sata_port
, CPHY_TX_OVERRIDE
+ lane
* SPHY_LANE
, tmp
);
297 tmp
|= (val
<< CPHY_SATA_TX_ATTEN_SHIFT
) & CPHY_SATA_TX_ATTEN
;
298 combo_phy_write(sata_port
, CPHY_TX_OVERRIDE
+ lane
* SPHY_LANE
, tmp
);
301 static void cphy_override_rx_mode(u8 sata_port
, u32 val
)
303 u8 lane
= port_data
[sata_port
].lane_mapping
;
305 tmp
= combo_phy_read(sata_port
, CPHY_RX_INPUT_STS
+ lane
* SPHY_LANE
);
306 tmp
&= ~CPHY_SATA_RX_OVERRIDE
;
307 combo_phy_write(sata_port
, CPHY_RX_OVERRIDE
+ lane
* SPHY_LANE
, tmp
);
309 tmp
|= CPHY_SATA_RX_OVERRIDE
;
310 combo_phy_write(sata_port
, CPHY_RX_OVERRIDE
+ lane
* SPHY_LANE
, tmp
);
312 tmp
&= ~CPHY_SATA_DPLL_MODE
;
313 tmp
|= val
<< CPHY_SATA_DPLL_SHIFT
;
314 combo_phy_write(sata_port
, CPHY_RX_OVERRIDE
+ lane
* SPHY_LANE
, tmp
);
316 tmp
|= CPHY_SATA_DPLL_RESET
;
317 combo_phy_write(sata_port
, CPHY_RX_OVERRIDE
+ lane
* SPHY_LANE
, tmp
);
319 tmp
&= ~CPHY_SATA_DPLL_RESET
;
320 combo_phy_write(sata_port
, CPHY_RX_OVERRIDE
+ lane
* SPHY_LANE
, tmp
);
325 static void highbank_cphy_override_lane(u8 sata_port
)
327 u8 lane
= port_data
[sata_port
].lane_mapping
;
330 if (unlikely(port_data
[sata_port
].phy_base
== NULL
))
333 tmp
= combo_phy_read(sata_port
, CPHY_RX_INPUT_STS
+
335 } while ((tmp
& SPHY_HALF_RATE
) && (k
++ < 1000));
336 cphy_override_rx_mode(sata_port
, 3);
337 cphy_override_tx_attenuation(sata_port
, port_data
[sata_port
].tx_atten
);
340 static int highbank_initialize_phys(struct device
*dev
, void __iomem
*addr
)
342 struct device_node
*sata_node
= dev
->of_node
;
343 int phy_count
= 0, phy
, port
= 0, i
;
344 void __iomem
*cphy_base
[CPHY_PHY_COUNT
] = {};
345 struct device_node
*phy_nodes
[CPHY_PHY_COUNT
] = {};
346 u32 tx_atten
[CPHY_PORT_COUNT
] = {};
348 memset(port_data
, 0, sizeof(struct phy_lane_info
) * CPHY_PORT_COUNT
);
352 struct of_phandle_args phy_data
;
353 if (of_parse_phandle_with_args(sata_node
,
354 "calxeda,port-phys", "#phy-cells",
357 for (phy
= 0; phy
< phy_count
; phy
++) {
358 if (phy_nodes
[phy
] == phy_data
.np
)
361 if (phy_nodes
[phy
] == NULL
) {
362 phy_nodes
[phy
] = phy_data
.np
;
363 cphy_base
[phy
] = of_iomap(phy_nodes
[phy
], 0);
364 if (cphy_base
[phy
] == NULL
) {
369 port_data
[port
].lane_mapping
= phy_data
.args
[0];
370 of_property_read_u32(phy_nodes
[phy
], "phydev", &tmp
);
371 port_data
[port
].phy_devs
= tmp
;
372 port_data
[port
].phy_base
= cphy_base
[phy
];
373 of_node_put(phy_data
.np
);
375 } while (port
< CPHY_PORT_COUNT
);
376 of_property_read_u32_array(sata_node
, "calxeda,tx-atten",
378 for (i
= 0; i
< port
; i
++)
379 port_data
[i
].tx_atten
= (u8
) tx_atten
[i
];
384 * The Calxeda SATA phy intermittently fails to bring up a link with Gen3
385 * Retrying the phy hard reset can work around the issue, but the drive
386 * may fail again. In less than 150 out of 15000 test runs, it took more
387 * than 10 tries for the link to be established (but never more than 35).
388 * Triple the maximum observed retry count to provide plenty of margin for
389 * rare events and to guarantee that the link is established.
391 * Also, the default 2 second time-out on a failed drive is too long in
392 * this situation. The uboot implementation of the same driver function
393 * uses a much shorter time-out period and never experiences a time out
394 * issue. Reducing the time-out to 500ms improves the responsiveness.
395 * The other timing constants were kept the same as the stock AHCI driver.
396 * This change was also tested 15000 times on 24 drives and none of them
397 * experienced a time out.
399 static int ahci_highbank_hardreset(struct ata_link
*link
, unsigned int *class,
400 unsigned long deadline
)
402 static const unsigned long timing
[] = { 5, 100, 500};
403 struct ata_port
*ap
= link
->ap
;
404 struct ahci_port_priv
*pp
= ap
->private_data
;
405 struct ahci_host_priv
*hpriv
= ap
->host
->private_data
;
406 u8
*d2h_fis
= pp
->rx_fis
+ RX_FIS_D2H_REG
;
407 struct ata_taskfile tf
;
413 ahci_stop_engine(ap
);
415 /* clear D2H reception area to properly wait for D2H FIS */
416 ata_tf_init(link
->device
, &tf
);
417 tf
.command
= ATA_BUSY
;
418 ata_tf_to_fis(&tf
, 0, 0, d2h_fis
);
421 highbank_cphy_disable_overrides(link
->ap
->port_no
);
422 rc
= sata_link_hardreset(link
, timing
, deadline
, &online
, NULL
);
423 highbank_cphy_override_lane(link
->ap
->port_no
);
425 /* If the status is 1, we are connected, but the link did not
426 * come up. So retry resetting the link again.
428 if (sata_scr_read(link
, SCR_STATUS
, &sstatus
))
430 if (!(sstatus
& 0x3))
432 } while (!online
&& retry
--);
434 hpriv
->start_engine(ap
);
437 *class = ahci_dev_classify(ap
);
442 static struct ata_port_operations ahci_highbank_ops
= {
443 .inherits
= &ahci_ops
,
444 .hardreset
= ahci_highbank_hardreset
,
445 .transmit_led_message
= ecx_transmit_led_message
,
448 static const struct ata_port_info ahci_highbank_port_info
= {
449 .flags
= AHCI_FLAG_COMMON
,
450 .pio_mask
= ATA_PIO4
,
451 .udma_mask
= ATA_UDMA6
,
452 .port_ops
= &ahci_highbank_ops
,
455 static struct scsi_host_template ahci_highbank_platform_sht
= {
456 AHCI_SHT("sata_highbank"),
459 static const struct of_device_id ahci_of_match
[] = {
460 { .compatible
= "calxeda,hb-ahci" },
463 MODULE_DEVICE_TABLE(of
, ahci_of_match
);
465 static int ahci_highbank_probe(struct platform_device
*pdev
)
467 struct device
*dev
= &pdev
->dev
;
468 struct ahci_host_priv
*hpriv
;
469 struct ecx_plat_data
*pdata
;
470 struct ata_host
*host
;
471 struct resource
*mem
;
476 struct ata_port_info pi
= ahci_highbank_port_info
;
477 const struct ata_port_info
*ppi
[] = { &pi
, NULL
};
479 mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
481 dev_err(dev
, "no mmio space\n");
485 irq
= platform_get_irq(pdev
, 0);
487 dev_err(dev
, "no irq\n");
491 hpriv
= devm_kzalloc(dev
, sizeof(*hpriv
), GFP_KERNEL
);
493 dev_err(dev
, "can't alloc ahci_host_priv\n");
496 pdata
= devm_kzalloc(dev
, sizeof(*pdata
), GFP_KERNEL
);
498 dev_err(dev
, "can't alloc ecx_plat_data\n");
502 hpriv
->flags
|= (unsigned long)pi
.private_data
;
504 hpriv
->mmio
= devm_ioremap(dev
, mem
->start
, resource_size(mem
));
506 dev_err(dev
, "can't map %pR\n", mem
);
510 rc
= highbank_initialize_phys(dev
, hpriv
->mmio
);
515 ahci_save_initial_config(dev
, hpriv
);
518 if (hpriv
->cap
& HOST_CAP_NCQ
)
519 pi
.flags
|= ATA_FLAG_NCQ
;
521 if (hpriv
->cap
& HOST_CAP_PMP
)
522 pi
.flags
|= ATA_FLAG_PMP
;
524 if (hpriv
->cap
& HOST_CAP_64
)
525 dma_set_coherent_mask(dev
, DMA_BIT_MASK(64));
527 /* CAP.NP sometimes indicate the index of the last enabled
528 * port, at other times, that of the last possible port, so
529 * determining the maximum port number requires looking at
530 * both CAP.NP and port_map.
532 n_ports
= max(ahci_nr_ports(hpriv
->cap
), fls(hpriv
->port_map
));
534 pdata
->n_ports
= n_ports
;
535 hpriv
->plat_data
= pdata
;
536 highbank_set_em_messages(dev
, hpriv
, &pi
);
538 host
= ata_host_alloc_pinfo(dev
, ppi
, n_ports
);
544 host
->private_data
= hpriv
;
546 if (!(hpriv
->cap
& HOST_CAP_SSS
) || ahci_ignore_sss
)
547 host
->flags
|= ATA_HOST_PARALLEL_SCAN
;
549 for (i
= 0; i
< host
->n_ports
; i
++) {
550 struct ata_port
*ap
= host
->ports
[i
];
552 ata_port_desc(ap
, "mmio %pR", mem
);
553 ata_port_desc(ap
, "port 0x%x", 0x100 + ap
->port_no
* 0x80);
555 /* set enclosure management message type */
556 if (ap
->flags
& ATA_FLAG_EM
)
557 ap
->em_message_type
= hpriv
->em_msg_type
;
559 /* disabled/not-implemented port */
560 if (!(hpriv
->port_map
& (1 << i
)))
561 ap
->ops
= &ata_dummy_port_ops
;
564 rc
= ahci_reset_controller(host
);
568 ahci_init_controller(host
);
569 ahci_print_info(host
, "platform");
571 rc
= ahci_host_activate(host
, irq
, &ahci_highbank_platform_sht
);
580 #ifdef CONFIG_PM_SLEEP
581 static int ahci_highbank_suspend(struct device
*dev
)
583 struct ata_host
*host
= dev_get_drvdata(dev
);
584 struct ahci_host_priv
*hpriv
= host
->private_data
;
585 void __iomem
*mmio
= hpriv
->mmio
;
589 if (hpriv
->flags
& AHCI_HFLAG_NO_SUSPEND
) {
590 dev_err(dev
, "firmware update required for suspend/resume\n");
595 * AHCI spec rev1.1 section 8.3.3:
596 * Software must disable interrupts prior to requesting a
597 * transition of the HBA to D3 state.
599 ctl
= readl(mmio
+ HOST_CTL
);
601 writel(ctl
, mmio
+ HOST_CTL
);
602 readl(mmio
+ HOST_CTL
); /* flush */
604 rc
= ata_host_suspend(host
, PMSG_SUSPEND
);
611 static int ahci_highbank_resume(struct device
*dev
)
613 struct ata_host
*host
= dev_get_drvdata(dev
);
616 if (dev
->power
.power_state
.event
== PM_EVENT_SUSPEND
) {
617 rc
= ahci_reset_controller(host
);
621 ahci_init_controller(host
);
624 ata_host_resume(host
);
630 static SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops
,
631 ahci_highbank_suspend
, ahci_highbank_resume
);
633 static struct platform_driver ahci_highbank_driver
= {
634 .remove
= ata_platform_remove_one
,
636 .name
= "highbank-ahci",
637 .of_match_table
= ahci_of_match
,
638 .pm
= &ahci_highbank_pm_ops
,
640 .probe
= ahci_highbank_probe
,
643 module_platform_driver(ahci_highbank_driver
);
645 MODULE_DESCRIPTION("Calxeda Highbank AHCI SATA platform driver");
646 MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>");
647 MODULE_LICENSE("GPL");
648 MODULE_ALIAS("sata:highbank");