PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / ata / sata_highbank.c
blob870b11eadc6d793d3abcebdcc6bb72c4cc326f45
1 /*
2 * Calxeda Highbank AHCI SATA platform driver
3 * Copyright 2012 Calxeda, Inc.
5 * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/kernel.h>
20 #include <linux/gfp.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/types.h>
24 #include <linux/err.h>
25 #include <linux/io.h>
26 #include <linux/spinlock.h>
27 #include <linux/device.h>
28 #include <linux/of_device.h>
29 #include <linux/of_address.h>
30 #include <linux/platform_device.h>
31 #include <linux/libata.h>
32 #include <linux/interrupt.h>
33 #include <linux/delay.h>
34 #include <linux/export.h>
35 #include <linux/gpio.h>
36 #include <linux/of_gpio.h>
38 #include "ahci.h"
40 #define CPHY_MAP(dev, addr) ((((dev) & 0x1f) << 7) | (((addr) >> 9) & 0x7f))
41 #define CPHY_ADDR(addr) (((addr) & 0x1ff) << 2)
42 #define SERDES_CR_CTL 0x80a0
43 #define SERDES_CR_ADDR 0x80a1
44 #define SERDES_CR_DATA 0x80a2
45 #define CR_BUSY 0x0001
46 #define CR_START 0x0001
47 #define CR_WR_RDN 0x0002
48 #define CPHY_TX_INPUT_STS 0x2001
49 #define CPHY_RX_INPUT_STS 0x2002
50 #define CPHY_SATA_TX_OVERRIDE 0x8000
51 #define CPHY_SATA_RX_OVERRIDE 0x4000
52 #define CPHY_TX_OVERRIDE 0x2004
53 #define CPHY_RX_OVERRIDE 0x2005
54 #define SPHY_LANE 0x100
55 #define SPHY_HALF_RATE 0x0001
56 #define CPHY_SATA_DPLL_MODE 0x0700
57 #define CPHY_SATA_DPLL_SHIFT 8
58 #define CPHY_SATA_DPLL_RESET (1 << 11)
59 #define CPHY_SATA_TX_ATTEN 0x1c00
60 #define CPHY_SATA_TX_ATTEN_SHIFT 10
61 #define CPHY_PHY_COUNT 6
62 #define CPHY_LANE_COUNT 4
63 #define CPHY_PORT_COUNT (CPHY_PHY_COUNT * CPHY_LANE_COUNT)
65 static DEFINE_SPINLOCK(cphy_lock);
66 /* Each of the 6 phys can have up to 4 sata ports attached to i. Map 0-based
67 * sata ports to their phys and then to their lanes within the phys
69 struct phy_lane_info {
70 void __iomem *phy_base;
71 u8 lane_mapping;
72 u8 phy_devs;
73 u8 tx_atten;
75 static struct phy_lane_info port_data[CPHY_PORT_COUNT];
77 static DEFINE_SPINLOCK(sgpio_lock);
78 #define SCLOCK 0
79 #define SLOAD 1
80 #define SDATA 2
81 #define SGPIO_PINS 3
82 #define SGPIO_PORTS 8
84 struct ecx_plat_data {
85 u32 n_ports;
86 /* number of extra clocks that the SGPIO PIC controller expects */
87 u32 pre_clocks;
88 u32 post_clocks;
89 unsigned sgpio_gpio[SGPIO_PINS];
90 u32 sgpio_pattern;
91 u32 port_to_sgpio[SGPIO_PORTS];
94 #define SGPIO_SIGNALS 3
95 #define ECX_ACTIVITY_BITS 0x300000
96 #define ECX_ACTIVITY_SHIFT 0
97 #define ECX_LOCATE_BITS 0x80000
98 #define ECX_LOCATE_SHIFT 1
99 #define ECX_FAULT_BITS 0x400000
100 #define ECX_FAULT_SHIFT 2
101 static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
102 u32 shift)
104 return 1 << (3 * pdata->port_to_sgpio[port] + shift);
107 static void ecx_parse_sgpio(struct ecx_plat_data *pdata, u32 port, u32 state)
109 if (state & ECX_ACTIVITY_BITS)
110 pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
111 ECX_ACTIVITY_SHIFT);
112 else
113 pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
114 ECX_ACTIVITY_SHIFT);
115 if (state & ECX_LOCATE_BITS)
116 pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
117 ECX_LOCATE_SHIFT);
118 else
119 pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
120 ECX_LOCATE_SHIFT);
121 if (state & ECX_FAULT_BITS)
122 pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
123 ECX_FAULT_SHIFT);
124 else
125 pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
126 ECX_FAULT_SHIFT);
130 * Tell the LED controller that the signal has changed by raising the clock
131 * line for 50 uS and then lowering it for 50 uS.
133 static void ecx_led_cycle_clock(struct ecx_plat_data *pdata)
135 gpio_set_value(pdata->sgpio_gpio[SCLOCK], 1);
136 udelay(50);
137 gpio_set_value(pdata->sgpio_gpio[SCLOCK], 0);
138 udelay(50);
141 static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state,
142 ssize_t size)
144 struct ahci_host_priv *hpriv = ap->host->private_data;
145 struct ecx_plat_data *pdata = (struct ecx_plat_data *) hpriv->plat_data;
146 struct ahci_port_priv *pp = ap->private_data;
147 unsigned long flags;
148 int pmp, i;
149 struct ahci_em_priv *emp;
150 u32 sgpio_out;
152 /* get the slot number from the message */
153 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
154 if (pmp < EM_MAX_SLOTS)
155 emp = &pp->em_priv[pmp];
156 else
157 return -EINVAL;
159 if (!(hpriv->em_msg_type & EM_MSG_TYPE_LED))
160 return size;
162 spin_lock_irqsave(&sgpio_lock, flags);
163 ecx_parse_sgpio(pdata, ap->port_no, state);
164 sgpio_out = pdata->sgpio_pattern;
165 for (i = 0; i < pdata->pre_clocks; i++)
166 ecx_led_cycle_clock(pdata);
168 gpio_set_value(pdata->sgpio_gpio[SLOAD], 1);
169 ecx_led_cycle_clock(pdata);
170 gpio_set_value(pdata->sgpio_gpio[SLOAD], 0);
172 * bit-bang out the SGPIO pattern, by consuming a bit and then
173 * clocking it out.
175 for (i = 0; i < (SGPIO_SIGNALS * pdata->n_ports); i++) {
176 gpio_set_value(pdata->sgpio_gpio[SDATA], sgpio_out & 1);
177 sgpio_out >>= 1;
178 ecx_led_cycle_clock(pdata);
180 for (i = 0; i < pdata->post_clocks; i++)
181 ecx_led_cycle_clock(pdata);
183 /* save off new led state for port/slot */
184 emp->led_state = state;
186 spin_unlock_irqrestore(&sgpio_lock, flags);
187 return size;
190 static void highbank_set_em_messages(struct device *dev,
191 struct ahci_host_priv *hpriv,
192 struct ata_port_info *pi)
194 struct device_node *np = dev->of_node;
195 struct ecx_plat_data *pdata = hpriv->plat_data;
196 int i;
197 int err;
199 for (i = 0; i < SGPIO_PINS; i++) {
200 err = of_get_named_gpio(np, "calxeda,sgpio-gpio", i);
201 if (IS_ERR_VALUE(err))
202 return;
204 pdata->sgpio_gpio[i] = err;
205 err = gpio_request(pdata->sgpio_gpio[i], "CX SGPIO");
206 if (err) {
207 pr_err("sata_highbank gpio_request %d failed: %d\n",
208 i, err);
209 return;
211 gpio_direction_output(pdata->sgpio_gpio[i], 1);
213 of_property_read_u32_array(np, "calxeda,led-order",
214 pdata->port_to_sgpio,
215 pdata->n_ports);
216 if (of_property_read_u32(np, "calxeda,pre-clocks", &pdata->pre_clocks))
217 pdata->pre_clocks = 0;
218 if (of_property_read_u32(np, "calxeda,post-clocks",
219 &pdata->post_clocks))
220 pdata->post_clocks = 0;
222 /* store em_loc */
223 hpriv->em_loc = 0;
224 hpriv->em_buf_sz = 4;
225 hpriv->em_msg_type = EM_MSG_TYPE_LED;
226 pi->flags |= ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY;
229 static u32 __combo_phy_reg_read(u8 sata_port, u32 addr)
231 u32 data;
232 u8 dev = port_data[sata_port].phy_devs;
233 spin_lock(&cphy_lock);
234 writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
235 data = readl(port_data[sata_port].phy_base + CPHY_ADDR(addr));
236 spin_unlock(&cphy_lock);
237 return data;
240 static void __combo_phy_reg_write(u8 sata_port, u32 addr, u32 data)
242 u8 dev = port_data[sata_port].phy_devs;
243 spin_lock(&cphy_lock);
244 writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
245 writel(data, port_data[sata_port].phy_base + CPHY_ADDR(addr));
246 spin_unlock(&cphy_lock);
249 static void combo_phy_wait_for_ready(u8 sata_port)
251 while (__combo_phy_reg_read(sata_port, SERDES_CR_CTL) & CR_BUSY)
252 udelay(5);
255 static u32 combo_phy_read(u8 sata_port, u32 addr)
257 combo_phy_wait_for_ready(sata_port);
258 __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
259 __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_START);
260 combo_phy_wait_for_ready(sata_port);
261 return __combo_phy_reg_read(sata_port, SERDES_CR_DATA);
264 static void combo_phy_write(u8 sata_port, u32 addr, u32 data)
266 combo_phy_wait_for_ready(sata_port);
267 __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
268 __combo_phy_reg_write(sata_port, SERDES_CR_DATA, data);
269 __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_WR_RDN | CR_START);
272 static void highbank_cphy_disable_overrides(u8 sata_port)
274 u8 lane = port_data[sata_port].lane_mapping;
275 u32 tmp;
276 if (unlikely(port_data[sata_port].phy_base == NULL))
277 return;
278 tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
279 tmp &= ~CPHY_SATA_RX_OVERRIDE;
280 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
283 static void cphy_override_tx_attenuation(u8 sata_port, u32 val)
285 u8 lane = port_data[sata_port].lane_mapping;
286 u32 tmp;
288 if (val & 0x8)
289 return;
291 tmp = combo_phy_read(sata_port, CPHY_TX_INPUT_STS + lane * SPHY_LANE);
292 tmp &= ~CPHY_SATA_TX_OVERRIDE;
293 combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
295 tmp |= CPHY_SATA_TX_OVERRIDE;
296 combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
298 tmp |= (val << CPHY_SATA_TX_ATTEN_SHIFT) & CPHY_SATA_TX_ATTEN;
299 combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
302 static void cphy_override_rx_mode(u8 sata_port, u32 val)
304 u8 lane = port_data[sata_port].lane_mapping;
305 u32 tmp;
306 tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
307 tmp &= ~CPHY_SATA_RX_OVERRIDE;
308 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
310 tmp |= CPHY_SATA_RX_OVERRIDE;
311 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
313 tmp &= ~CPHY_SATA_DPLL_MODE;
314 tmp |= val << CPHY_SATA_DPLL_SHIFT;
315 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
317 tmp |= CPHY_SATA_DPLL_RESET;
318 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
320 tmp &= ~CPHY_SATA_DPLL_RESET;
321 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
323 msleep(15);
326 static void highbank_cphy_override_lane(u8 sata_port)
328 u8 lane = port_data[sata_port].lane_mapping;
329 u32 tmp, k = 0;
331 if (unlikely(port_data[sata_port].phy_base == NULL))
332 return;
333 do {
334 tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS +
335 lane * SPHY_LANE);
336 } while ((tmp & SPHY_HALF_RATE) && (k++ < 1000));
337 cphy_override_rx_mode(sata_port, 3);
338 cphy_override_tx_attenuation(sata_port, port_data[sata_port].tx_atten);
341 static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
343 struct device_node *sata_node = dev->of_node;
344 int phy_count = 0, phy, port = 0, i;
345 void __iomem *cphy_base[CPHY_PHY_COUNT] = {};
346 struct device_node *phy_nodes[CPHY_PHY_COUNT] = {};
347 u32 tx_atten[CPHY_PORT_COUNT] = {};
349 memset(port_data, 0, sizeof(struct phy_lane_info) * CPHY_PORT_COUNT);
351 do {
352 u32 tmp;
353 struct of_phandle_args phy_data;
354 if (of_parse_phandle_with_args(sata_node,
355 "calxeda,port-phys", "#phy-cells",
356 port, &phy_data))
357 break;
358 for (phy = 0; phy < phy_count; phy++) {
359 if (phy_nodes[phy] == phy_data.np)
360 break;
362 if (phy_nodes[phy] == NULL) {
363 phy_nodes[phy] = phy_data.np;
364 cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
365 if (cphy_base[phy] == NULL) {
366 return 0;
368 phy_count += 1;
370 port_data[port].lane_mapping = phy_data.args[0];
371 of_property_read_u32(phy_nodes[phy], "phydev", &tmp);
372 port_data[port].phy_devs = tmp;
373 port_data[port].phy_base = cphy_base[phy];
374 of_node_put(phy_data.np);
375 port += 1;
376 } while (port < CPHY_PORT_COUNT);
377 of_property_read_u32_array(sata_node, "calxeda,tx-atten",
378 tx_atten, port);
379 for (i = 0; i < port; i++)
380 port_data[i].tx_atten = (u8) tx_atten[i];
381 return 0;
385 * The Calxeda SATA phy intermittently fails to bring up a link with Gen3
386 * Retrying the phy hard reset can work around the issue, but the drive
387 * may fail again. In less than 150 out of 15000 test runs, it took more
388 * than 10 tries for the link to be established (but never more than 35).
389 * Triple the maximum observed retry count to provide plenty of margin for
390 * rare events and to guarantee that the link is established.
392 * Also, the default 2 second time-out on a failed drive is too long in
393 * this situation. The uboot implementation of the same driver function
394 * uses a much shorter time-out period and never experiences a time out
395 * issue. Reducing the time-out to 500ms improves the responsiveness.
396 * The other timing constants were kept the same as the stock AHCI driver.
397 * This change was also tested 15000 times on 24 drives and none of them
398 * experienced a time out.
400 static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
401 unsigned long deadline)
403 static const unsigned long timing[] = { 5, 100, 500};
404 struct ata_port *ap = link->ap;
405 struct ahci_port_priv *pp = ap->private_data;
406 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
407 struct ata_taskfile tf;
408 bool online;
409 u32 sstatus;
410 int rc;
411 int retry = 100;
413 ahci_stop_engine(ap);
415 /* clear D2H reception area to properly wait for D2H FIS */
416 ata_tf_init(link->device, &tf);
417 tf.command = ATA_BUSY;
418 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
420 do {
421 highbank_cphy_disable_overrides(link->ap->port_no);
422 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
423 highbank_cphy_override_lane(link->ap->port_no);
425 /* If the status is 1, we are connected, but the link did not
426 * come up. So retry resetting the link again.
428 if (sata_scr_read(link, SCR_STATUS, &sstatus))
429 break;
430 if (!(sstatus & 0x3))
431 break;
432 } while (!online && retry--);
434 ahci_start_engine(ap);
436 if (online)
437 *class = ahci_dev_classify(ap);
439 return rc;
442 static struct ata_port_operations ahci_highbank_ops = {
443 .inherits = &ahci_ops,
444 .hardreset = ahci_highbank_hardreset,
445 .transmit_led_message = ecx_transmit_led_message,
448 static const struct ata_port_info ahci_highbank_port_info = {
449 .flags = AHCI_FLAG_COMMON,
450 .pio_mask = ATA_PIO4,
451 .udma_mask = ATA_UDMA6,
452 .port_ops = &ahci_highbank_ops,
455 static struct scsi_host_template ahci_highbank_platform_sht = {
456 AHCI_SHT("sata_highbank"),
459 static const struct of_device_id ahci_of_match[] = {
460 { .compatible = "calxeda,hb-ahci" },
463 MODULE_DEVICE_TABLE(of, ahci_of_match);
465 static int ahci_highbank_probe(struct platform_device *pdev)
467 struct device *dev = &pdev->dev;
468 struct ahci_host_priv *hpriv;
469 struct ecx_plat_data *pdata;
470 struct ata_host *host;
471 struct resource *mem;
472 int irq;
473 int i;
474 int rc;
475 u32 n_ports;
476 struct ata_port_info pi = ahci_highbank_port_info;
477 const struct ata_port_info *ppi[] = { &pi, NULL };
479 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
480 if (!mem) {
481 dev_err(dev, "no mmio space\n");
482 return -EINVAL;
485 irq = platform_get_irq(pdev, 0);
486 if (irq <= 0) {
487 dev_err(dev, "no irq\n");
488 return -EINVAL;
491 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
492 if (!hpriv) {
493 dev_err(dev, "can't alloc ahci_host_priv\n");
494 return -ENOMEM;
496 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
497 if (!pdata) {
498 dev_err(dev, "can't alloc ecx_plat_data\n");
499 return -ENOMEM;
502 hpriv->flags |= (unsigned long)pi.private_data;
504 hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
505 if (!hpriv->mmio) {
506 dev_err(dev, "can't map %pR\n", mem);
507 return -ENOMEM;
510 rc = highbank_initialize_phys(dev, hpriv->mmio);
511 if (rc)
512 return rc;
515 ahci_save_initial_config(dev, hpriv, 0, 0);
517 /* prepare host */
518 if (hpriv->cap & HOST_CAP_NCQ)
519 pi.flags |= ATA_FLAG_NCQ;
521 if (hpriv->cap & HOST_CAP_PMP)
522 pi.flags |= ATA_FLAG_PMP;
524 if (hpriv->cap & HOST_CAP_64)
525 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
527 /* CAP.NP sometimes indicate the index of the last enabled
528 * port, at other times, that of the last possible port, so
529 * determining the maximum port number requires looking at
530 * both CAP.NP and port_map.
532 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
534 pdata->n_ports = n_ports;
535 hpriv->plat_data = pdata;
536 highbank_set_em_messages(dev, hpriv, &pi);
538 host = ata_host_alloc_pinfo(dev, ppi, n_ports);
539 if (!host) {
540 rc = -ENOMEM;
541 goto err0;
544 host->private_data = hpriv;
546 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
547 host->flags |= ATA_HOST_PARALLEL_SCAN;
549 for (i = 0; i < host->n_ports; i++) {
550 struct ata_port *ap = host->ports[i];
552 ata_port_desc(ap, "mmio %pR", mem);
553 ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
555 /* set enclosure management message type */
556 if (ap->flags & ATA_FLAG_EM)
557 ap->em_message_type = hpriv->em_msg_type;
559 /* disabled/not-implemented port */
560 if (!(hpriv->port_map & (1 << i)))
561 ap->ops = &ata_dummy_port_ops;
564 rc = ahci_reset_controller(host);
565 if (rc)
566 goto err0;
568 ahci_init_controller(host);
569 ahci_print_info(host, "platform");
571 rc = ata_host_activate(host, irq, ahci_interrupt, 0,
572 &ahci_highbank_platform_sht);
573 if (rc)
574 goto err0;
576 return 0;
577 err0:
578 return rc;
581 #ifdef CONFIG_PM_SLEEP
582 static int ahci_highbank_suspend(struct device *dev)
584 struct ata_host *host = dev_get_drvdata(dev);
585 struct ahci_host_priv *hpriv = host->private_data;
586 void __iomem *mmio = hpriv->mmio;
587 u32 ctl;
588 int rc;
590 if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
591 dev_err(dev, "firmware update required for suspend/resume\n");
592 return -EIO;
596 * AHCI spec rev1.1 section 8.3.3:
597 * Software must disable interrupts prior to requesting a
598 * transition of the HBA to D3 state.
600 ctl = readl(mmio + HOST_CTL);
601 ctl &= ~HOST_IRQ_EN;
602 writel(ctl, mmio + HOST_CTL);
603 readl(mmio + HOST_CTL); /* flush */
605 rc = ata_host_suspend(host, PMSG_SUSPEND);
606 if (rc)
607 return rc;
609 return 0;
612 static int ahci_highbank_resume(struct device *dev)
614 struct ata_host *host = dev_get_drvdata(dev);
615 int rc;
617 if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
618 rc = ahci_reset_controller(host);
619 if (rc)
620 return rc;
622 ahci_init_controller(host);
625 ata_host_resume(host);
627 return 0;
629 #endif
631 static SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops,
632 ahci_highbank_suspend, ahci_highbank_resume);
634 static struct platform_driver ahci_highbank_driver = {
635 .remove = ata_platform_remove_one,
636 .driver = {
637 .name = "highbank-ahci",
638 .owner = THIS_MODULE,
639 .of_match_table = ahci_of_match,
640 .pm = &ahci_highbank_pm_ops,
642 .probe = ahci_highbank_probe,
645 module_platform_driver(ahci_highbank_driver);
647 MODULE_DESCRIPTION("Calxeda Highbank AHCI SATA platform driver");
648 MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>");
649 MODULE_LICENSE("GPL");
650 MODULE_ALIAS("sata:highbank");