1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * sata_promise.c - Promise SATA
5 * Maintained by: Tejun Heo <tj@kernel.org>
7 * Please ALWAYS copy linux-ide@vger.kernel.org
10 * Copyright 2003-2004 Red Hat, Inc.
12 * libata documentation is available via 'make {ps|pdf}docs',
13 * as Documentation/driver-api/libata.rst
15 * Hardware information only available under NDA.
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/gfp.h>
21 #include <linux/pci.h>
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/device.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_host.h>
28 #include <scsi/scsi_cmnd.h>
29 #include <linux/libata.h>
30 #include "sata_promise.h"
32 #define DRV_NAME "sata_promise"
33 #define DRV_VERSION "2.12"
38 PDC_MAX_PRD
= LIBATA_MAX_PRD
- 1, /* -1 for ASIC PRD bug workaround */
40 /* host register offsets (from host->iomap[PDC_MMIO_BAR]) */
41 PDC_INT_SEQMASK
= 0x40, /* Mask of asserted SEQ INTs */
42 PDC_FLASH_CTL
= 0x44, /* Flash control register */
43 PDC_PCI_CTL
= 0x48, /* PCI control/status reg */
44 PDC_SATA_PLUG_CSR
= 0x6C, /* SATA Plug control/status reg */
45 PDC2_SATA_PLUG_CSR
= 0x60, /* SATAII Plug control/status reg */
46 PDC_TBG_MODE
= 0x41C, /* TBG mode (not SATAII) */
47 PDC_SLEW_CTL
= 0x470, /* slew rate control reg (not SATAII) */
49 /* per-port ATA register offsets (from ap->ioaddr.cmd_addr) */
50 PDC_FEATURE
= 0x04, /* Feature/Error reg (per port) */
51 PDC_SECTOR_COUNT
= 0x08, /* Sector count reg (per port) */
52 PDC_SECTOR_NUMBER
= 0x0C, /* Sector number reg (per port) */
53 PDC_CYLINDER_LOW
= 0x10, /* Cylinder low reg (per port) */
54 PDC_CYLINDER_HIGH
= 0x14, /* Cylinder high reg (per port) */
55 PDC_DEVICE
= 0x18, /* Device/Head reg (per port) */
56 PDC_COMMAND
= 0x1C, /* Command/status reg (per port) */
57 PDC_ALTSTATUS
= 0x38, /* Alternate-status/device-control reg (per port) */
58 PDC_PKT_SUBMIT
= 0x40, /* Command packet pointer addr */
59 PDC_GLOBAL_CTL
= 0x48, /* Global control/status (per port) */
60 PDC_CTLSTAT
= 0x60, /* IDE control and status (per port) */
62 /* per-port SATA register offsets (from ap->ioaddr.scr_addr) */
63 PDC_SATA_ERROR
= 0x04,
65 PDC_LINK_LAYER_ERRORS
= 0x6C,
66 PDC_FPDMA_CTLSTAT
= 0xD8,
67 PDC_INTERNAL_DEBUG_1
= 0xF8, /* also used for PATA */
68 PDC_INTERNAL_DEBUG_2
= 0xFC, /* also used for PATA */
70 /* PDC_FPDMA_CTLSTAT bit definitions */
71 PDC_FPDMA_CTLSTAT_RESET
= 1 << 3,
72 PDC_FPDMA_CTLSTAT_DMASETUP_INT_FLAG
= 1 << 10,
73 PDC_FPDMA_CTLSTAT_SETDB_INT_FLAG
= 1 << 11,
75 /* PDC_GLOBAL_CTL bit definitions */
76 PDC_PH_ERR
= (1 << 8), /* PCI error while loading packet */
77 PDC_SH_ERR
= (1 << 9), /* PCI error while loading S/G table */
78 PDC_DH_ERR
= (1 << 10), /* PCI error while loading data */
79 PDC2_HTO_ERR
= (1 << 12), /* host bus timeout */
80 PDC2_ATA_HBA_ERR
= (1 << 13), /* error during SATA DATA FIS transmission */
81 PDC2_ATA_DMA_CNT_ERR
= (1 << 14), /* DMA DATA FIS size differs from S/G count */
82 PDC_OVERRUN_ERR
= (1 << 19), /* S/G byte count larger than HD requires */
83 PDC_UNDERRUN_ERR
= (1 << 20), /* S/G byte count less than HD requires */
84 PDC_DRIVE_ERR
= (1 << 21), /* drive error */
85 PDC_PCI_SYS_ERR
= (1 << 22), /* PCI system error */
86 PDC1_PCI_PARITY_ERR
= (1 << 23), /* PCI parity error (from SATA150 driver) */
87 PDC1_ERR_MASK
= PDC1_PCI_PARITY_ERR
,
88 PDC2_ERR_MASK
= PDC2_HTO_ERR
| PDC2_ATA_HBA_ERR
|
90 PDC_ERR_MASK
= PDC_PH_ERR
| PDC_SH_ERR
| PDC_DH_ERR
|
91 PDC_OVERRUN_ERR
| PDC_UNDERRUN_ERR
|
92 PDC_DRIVE_ERR
| PDC_PCI_SYS_ERR
|
93 PDC1_ERR_MASK
| PDC2_ERR_MASK
,
95 board_2037x
= 0, /* FastTrak S150 TX2plus */
96 board_2037x_pata
= 1, /* FastTrak S150 TX2plus PATA port */
97 board_20319
= 2, /* FastTrak S150 TX4 */
98 board_20619
= 3, /* FastTrak TX4000 */
99 board_2057x
= 4, /* SATAII150 Tx2plus */
100 board_2057x_pata
= 5, /* SATAII150 Tx2plus PATA port */
101 board_40518
= 6, /* SATAII150 Tx4 */
103 PDC_HAS_PATA
= (1 << 1), /* PDC20375/20575 has PATA */
105 /* Sequence counter control registers bit definitions */
106 PDC_SEQCNTRL_INT_MASK
= (1 << 5), /* Sequence Interrupt Mask */
108 /* Feature register values */
109 PDC_FEATURE_ATAPI_PIO
= 0x00, /* ATAPI data xfer by PIO */
110 PDC_FEATURE_ATAPI_DMA
= 0x01, /* ATAPI data xfer by DMA */
112 /* Device/Head register values */
113 PDC_DEVICE_SATA
= 0xE0, /* Device/Head value for SATA devices */
115 /* PDC_CTLSTAT bit definitions */
116 PDC_DMA_ENABLE
= (1 << 7),
117 PDC_IRQ_DISABLE
= (1 << 10),
118 PDC_RESET
= (1 << 11), /* HDMA reset */
120 PDC_COMMON_FLAGS
= ATA_FLAG_PIO_POLLING
,
123 PDC_FLAG_GEN_II
= (1 << 24),
124 PDC_FLAG_SATA_PATA
= (1 << 25), /* supports SATA + PATA */
125 PDC_FLAG_4_PORTS
= (1 << 26), /* 4 ports */
128 struct pdc_port_priv
{
133 struct pdc_host_priv
{
134 spinlock_t hard_reset_lock
;
137 static int pdc_sata_scr_read(struct ata_link
*link
, unsigned int sc_reg
, u32
*val
);
138 static int pdc_sata_scr_write(struct ata_link
*link
, unsigned int sc_reg
, u32 val
);
139 static int pdc_ata_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
140 static int pdc_common_port_start(struct ata_port
*ap
);
141 static int pdc_sata_port_start(struct ata_port
*ap
);
142 static void pdc_qc_prep(struct ata_queued_cmd
*qc
);
143 static void pdc_tf_load_mmio(struct ata_port
*ap
, const struct ata_taskfile
*tf
);
144 static void pdc_exec_command_mmio(struct ata_port
*ap
, const struct ata_taskfile
*tf
);
145 static int pdc_check_atapi_dma(struct ata_queued_cmd
*qc
);
146 static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd
*qc
);
147 static void pdc_irq_clear(struct ata_port
*ap
);
148 static unsigned int pdc_qc_issue(struct ata_queued_cmd
*qc
);
149 static void pdc_freeze(struct ata_port
*ap
);
150 static void pdc_sata_freeze(struct ata_port
*ap
);
151 static void pdc_thaw(struct ata_port
*ap
);
152 static void pdc_sata_thaw(struct ata_port
*ap
);
153 static int pdc_pata_softreset(struct ata_link
*link
, unsigned int *class,
154 unsigned long deadline
);
155 static int pdc_sata_hardreset(struct ata_link
*link
, unsigned int *class,
156 unsigned long deadline
);
157 static void pdc_error_handler(struct ata_port
*ap
);
158 static void pdc_post_internal_cmd(struct ata_queued_cmd
*qc
);
159 static int pdc_pata_cable_detect(struct ata_port
*ap
);
160 static int pdc_sata_cable_detect(struct ata_port
*ap
);
162 static struct scsi_host_template pdc_ata_sht
= {
163 ATA_BASE_SHT(DRV_NAME
),
164 .sg_tablesize
= PDC_MAX_PRD
,
165 .dma_boundary
= ATA_DMA_BOUNDARY
,
168 static const struct ata_port_operations pdc_common_ops
= {
169 .inherits
= &ata_sff_port_ops
,
171 .sff_tf_load
= pdc_tf_load_mmio
,
172 .sff_exec_command
= pdc_exec_command_mmio
,
173 .check_atapi_dma
= pdc_check_atapi_dma
,
174 .qc_prep
= pdc_qc_prep
,
175 .qc_issue
= pdc_qc_issue
,
177 .sff_irq_clear
= pdc_irq_clear
,
178 .lost_interrupt
= ATA_OP_NULL
,
180 .post_internal_cmd
= pdc_post_internal_cmd
,
181 .error_handler
= pdc_error_handler
,
184 static struct ata_port_operations pdc_sata_ops
= {
185 .inherits
= &pdc_common_ops
,
186 .cable_detect
= pdc_sata_cable_detect
,
187 .freeze
= pdc_sata_freeze
,
188 .thaw
= pdc_sata_thaw
,
189 .scr_read
= pdc_sata_scr_read
,
190 .scr_write
= pdc_sata_scr_write
,
191 .port_start
= pdc_sata_port_start
,
192 .hardreset
= pdc_sata_hardreset
,
195 /* First-generation chips need a more restrictive ->check_atapi_dma op,
196 and ->freeze/thaw that ignore the hotplug controls. */
197 static struct ata_port_operations pdc_old_sata_ops
= {
198 .inherits
= &pdc_sata_ops
,
199 .freeze
= pdc_freeze
,
201 .check_atapi_dma
= pdc_old_sata_check_atapi_dma
,
204 static struct ata_port_operations pdc_pata_ops
= {
205 .inherits
= &pdc_common_ops
,
206 .cable_detect
= pdc_pata_cable_detect
,
207 .freeze
= pdc_freeze
,
209 .port_start
= pdc_common_port_start
,
210 .softreset
= pdc_pata_softreset
,
213 static const struct ata_port_info pdc_port_info
[] = {
216 .flags
= PDC_COMMON_FLAGS
| ATA_FLAG_SATA
|
218 .pio_mask
= ATA_PIO4
,
219 .mwdma_mask
= ATA_MWDMA2
,
220 .udma_mask
= ATA_UDMA6
,
221 .port_ops
= &pdc_old_sata_ops
,
226 .flags
= PDC_COMMON_FLAGS
| ATA_FLAG_SLAVE_POSS
,
227 .pio_mask
= ATA_PIO4
,
228 .mwdma_mask
= ATA_MWDMA2
,
229 .udma_mask
= ATA_UDMA6
,
230 .port_ops
= &pdc_pata_ops
,
235 .flags
= PDC_COMMON_FLAGS
| ATA_FLAG_SATA
|
237 .pio_mask
= ATA_PIO4
,
238 .mwdma_mask
= ATA_MWDMA2
,
239 .udma_mask
= ATA_UDMA6
,
240 .port_ops
= &pdc_old_sata_ops
,
245 .flags
= PDC_COMMON_FLAGS
| ATA_FLAG_SLAVE_POSS
|
247 .pio_mask
= ATA_PIO4
,
248 .mwdma_mask
= ATA_MWDMA2
,
249 .udma_mask
= ATA_UDMA6
,
250 .port_ops
= &pdc_pata_ops
,
255 .flags
= PDC_COMMON_FLAGS
| ATA_FLAG_SATA
|
256 PDC_FLAG_GEN_II
| PDC_FLAG_SATA_PATA
,
257 .pio_mask
= ATA_PIO4
,
258 .mwdma_mask
= ATA_MWDMA2
,
259 .udma_mask
= ATA_UDMA6
,
260 .port_ops
= &pdc_sata_ops
,
265 .flags
= PDC_COMMON_FLAGS
| ATA_FLAG_SLAVE_POSS
|
267 .pio_mask
= ATA_PIO4
,
268 .mwdma_mask
= ATA_MWDMA2
,
269 .udma_mask
= ATA_UDMA6
,
270 .port_ops
= &pdc_pata_ops
,
275 .flags
= PDC_COMMON_FLAGS
| ATA_FLAG_SATA
|
276 PDC_FLAG_GEN_II
| PDC_FLAG_4_PORTS
,
277 .pio_mask
= ATA_PIO4
,
278 .mwdma_mask
= ATA_MWDMA2
,
279 .udma_mask
= ATA_UDMA6
,
280 .port_ops
= &pdc_sata_ops
,
284 static const struct pci_device_id pdc_ata_pci_tbl
[] = {
285 { PCI_VDEVICE(PROMISE
, 0x3371), board_2037x
},
286 { PCI_VDEVICE(PROMISE
, 0x3373), board_2037x
},
287 { PCI_VDEVICE(PROMISE
, 0x3375), board_2037x
},
288 { PCI_VDEVICE(PROMISE
, 0x3376), board_2037x
},
289 { PCI_VDEVICE(PROMISE
, 0x3570), board_2057x
},
290 { PCI_VDEVICE(PROMISE
, 0x3571), board_2057x
},
291 { PCI_VDEVICE(PROMISE
, 0x3574), board_2057x
},
292 { PCI_VDEVICE(PROMISE
, 0x3577), board_2057x
},
293 { PCI_VDEVICE(PROMISE
, 0x3d73), board_2057x
},
294 { PCI_VDEVICE(PROMISE
, 0x3d75), board_2057x
},
296 { PCI_VDEVICE(PROMISE
, 0x3318), board_20319
},
297 { PCI_VDEVICE(PROMISE
, 0x3319), board_20319
},
298 { PCI_VDEVICE(PROMISE
, 0x3515), board_40518
},
299 { PCI_VDEVICE(PROMISE
, 0x3519), board_40518
},
300 { PCI_VDEVICE(PROMISE
, 0x3d17), board_40518
},
301 { PCI_VDEVICE(PROMISE
, 0x3d18), board_40518
},
303 { PCI_VDEVICE(PROMISE
, 0x6629), board_20619
},
305 { } /* terminate list */
308 static struct pci_driver pdc_ata_pci_driver
= {
310 .id_table
= pdc_ata_pci_tbl
,
311 .probe
= pdc_ata_init_one
,
312 .remove
= ata_pci_remove_one
,
315 static int pdc_common_port_start(struct ata_port
*ap
)
317 struct device
*dev
= ap
->host
->dev
;
318 struct pdc_port_priv
*pp
;
321 /* we use the same prd table as bmdma, allocate it */
322 rc
= ata_bmdma_port_start(ap
);
326 pp
= devm_kzalloc(dev
, sizeof(*pp
), GFP_KERNEL
);
330 pp
->pkt
= dmam_alloc_coherent(dev
, 128, &pp
->pkt_dma
, GFP_KERNEL
);
334 ap
->private_data
= pp
;
339 static int pdc_sata_port_start(struct ata_port
*ap
)
343 rc
= pdc_common_port_start(ap
);
347 /* fix up PHYMODE4 align timing */
348 if (ap
->flags
& PDC_FLAG_GEN_II
) {
349 void __iomem
*sata_mmio
= ap
->ioaddr
.scr_addr
;
352 tmp
= readl(sata_mmio
+ PDC_PHYMODE4
);
353 tmp
= (tmp
& ~3) | 1; /* set bits 1:0 = 0:1 */
354 writel(tmp
, sata_mmio
+ PDC_PHYMODE4
);
360 static void pdc_fpdma_clear_interrupt_flag(struct ata_port
*ap
)
362 void __iomem
*sata_mmio
= ap
->ioaddr
.scr_addr
;
365 tmp
= readl(sata_mmio
+ PDC_FPDMA_CTLSTAT
);
366 tmp
|= PDC_FPDMA_CTLSTAT_DMASETUP_INT_FLAG
;
367 tmp
|= PDC_FPDMA_CTLSTAT_SETDB_INT_FLAG
;
369 /* It's not allowed to write to the entire FPDMA_CTLSTAT register
370 when NCQ is running. So do a byte-sized write to bits 10 and 11. */
371 writeb(tmp
>> 8, sata_mmio
+ PDC_FPDMA_CTLSTAT
+ 1);
372 readb(sata_mmio
+ PDC_FPDMA_CTLSTAT
+ 1); /* flush */
375 static void pdc_fpdma_reset(struct ata_port
*ap
)
377 void __iomem
*sata_mmio
= ap
->ioaddr
.scr_addr
;
380 tmp
= (u8
)readl(sata_mmio
+ PDC_FPDMA_CTLSTAT
);
382 tmp
|= PDC_FPDMA_CTLSTAT_RESET
;
383 writeb(tmp
, sata_mmio
+ PDC_FPDMA_CTLSTAT
);
384 readl(sata_mmio
+ PDC_FPDMA_CTLSTAT
); /* flush */
386 tmp
&= ~PDC_FPDMA_CTLSTAT_RESET
;
387 writeb(tmp
, sata_mmio
+ PDC_FPDMA_CTLSTAT
);
388 readl(sata_mmio
+ PDC_FPDMA_CTLSTAT
); /* flush */
390 pdc_fpdma_clear_interrupt_flag(ap
);
393 static void pdc_not_at_command_packet_phase(struct ata_port
*ap
)
395 void __iomem
*sata_mmio
= ap
->ioaddr
.scr_addr
;
399 /* check not at ASIC packet command phase */
400 for (i
= 0; i
< 100; ++i
) {
401 writel(0, sata_mmio
+ PDC_INTERNAL_DEBUG_1
);
402 tmp
= readl(sata_mmio
+ PDC_INTERNAL_DEBUG_2
);
403 if ((tmp
& 0xF) != 1)
409 static void pdc_clear_internal_debug_record_error_register(struct ata_port
*ap
)
411 void __iomem
*sata_mmio
= ap
->ioaddr
.scr_addr
;
413 writel(0xffffffff, sata_mmio
+ PDC_SATA_ERROR
);
414 writel(0xffff0000, sata_mmio
+ PDC_LINK_LAYER_ERRORS
);
417 static void pdc_reset_port(struct ata_port
*ap
)
419 void __iomem
*ata_ctlstat_mmio
= ap
->ioaddr
.cmd_addr
+ PDC_CTLSTAT
;
423 if (ap
->flags
& PDC_FLAG_GEN_II
)
424 pdc_not_at_command_packet_phase(ap
);
426 tmp
= readl(ata_ctlstat_mmio
);
428 writel(tmp
, ata_ctlstat_mmio
);
430 for (i
= 11; i
> 0; i
--) {
431 tmp
= readl(ata_ctlstat_mmio
);
438 writel(tmp
, ata_ctlstat_mmio
);
442 writel(tmp
, ata_ctlstat_mmio
);
443 readl(ata_ctlstat_mmio
); /* flush */
445 if (sata_scr_valid(&ap
->link
) && (ap
->flags
& PDC_FLAG_GEN_II
)) {
447 pdc_clear_internal_debug_record_error_register(ap
);
451 static int pdc_pata_cable_detect(struct ata_port
*ap
)
454 void __iomem
*ata_mmio
= ap
->ioaddr
.cmd_addr
;
456 tmp
= readb(ata_mmio
+ PDC_CTLSTAT
+ 3);
458 return ATA_CBL_PATA40
;
459 return ATA_CBL_PATA80
;
462 static int pdc_sata_cable_detect(struct ata_port
*ap
)
467 static int pdc_sata_scr_read(struct ata_link
*link
,
468 unsigned int sc_reg
, u32
*val
)
470 if (sc_reg
> SCR_CONTROL
)
472 *val
= readl(link
->ap
->ioaddr
.scr_addr
+ (sc_reg
* 4));
476 static int pdc_sata_scr_write(struct ata_link
*link
,
477 unsigned int sc_reg
, u32 val
)
479 if (sc_reg
> SCR_CONTROL
)
481 writel(val
, link
->ap
->ioaddr
.scr_addr
+ (sc_reg
* 4));
485 static void pdc_atapi_pkt(struct ata_queued_cmd
*qc
)
487 struct ata_port
*ap
= qc
->ap
;
488 dma_addr_t sg_table
= ap
->bmdma_prd_dma
;
489 unsigned int cdb_len
= qc
->dev
->cdb_len
;
491 struct pdc_port_priv
*pp
= ap
->private_data
;
493 __le32
*buf32
= (__le32
*) buf
;
494 unsigned int dev_sel
, feature
;
496 /* set control bits (byte 0), zero delay seq id (byte 3),
497 * and seq id (byte 2)
499 switch (qc
->tf
.protocol
) {
501 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
502 buf32
[0] = cpu_to_le32(PDC_PKT_READ
);
506 case ATAPI_PROT_NODATA
:
507 buf32
[0] = cpu_to_le32(PDC_PKT_NODATA
);
513 buf32
[1] = cpu_to_le32(sg_table
); /* S/G table addr */
514 buf32
[2] = 0; /* no next-packet */
517 if (sata_scr_valid(&ap
->link
))
518 dev_sel
= PDC_DEVICE_SATA
;
520 dev_sel
= qc
->tf
.device
;
522 buf
[12] = (1 << 5) | ATA_REG_DEVICE
;
524 buf
[14] = (1 << 5) | ATA_REG_DEVICE
| PDC_PKT_CLEAR_BSY
;
525 buf
[15] = dev_sel
; /* once more, waiting for BSY to clear */
527 buf
[16] = (1 << 5) | ATA_REG_NSECT
;
528 buf
[17] = qc
->tf
.nsect
;
529 buf
[18] = (1 << 5) | ATA_REG_LBAL
;
530 buf
[19] = qc
->tf
.lbal
;
532 /* set feature and byte counter registers */
533 if (qc
->tf
.protocol
!= ATAPI_PROT_DMA
)
534 feature
= PDC_FEATURE_ATAPI_PIO
;
536 feature
= PDC_FEATURE_ATAPI_DMA
;
538 buf
[20] = (1 << 5) | ATA_REG_FEATURE
;
540 buf
[22] = (1 << 5) | ATA_REG_BYTEL
;
541 buf
[23] = qc
->tf
.lbam
;
542 buf
[24] = (1 << 5) | ATA_REG_BYTEH
;
543 buf
[25] = qc
->tf
.lbah
;
545 /* send ATAPI packet command 0xA0 */
546 buf
[26] = (1 << 5) | ATA_REG_CMD
;
547 buf
[27] = qc
->tf
.command
;
549 /* select drive and check DRQ */
550 buf
[28] = (1 << 5) | ATA_REG_DEVICE
| PDC_PKT_WAIT_DRDY
;
553 /* we can represent cdb lengths 2/4/6/8/10/12/14/16 */
554 BUG_ON(cdb_len
& ~0x1E);
556 /* append the CDB as the final part */
557 buf
[30] = (((cdb_len
>> 1) & 7) << 5) | ATA_REG_DATA
| PDC_LAST_REG
;
558 memcpy(buf
+31, cdb
, cdb_len
);
562 * pdc_fill_sg - Fill PCI IDE PRD table
563 * @qc: Metadata associated with taskfile to be transferred
565 * Fill PCI IDE PRD (scatter-gather) table with segments
566 * associated with the current disk command.
567 * Make sure hardware does not choke on it.
570 * spin_lock_irqsave(host lock)
573 static void pdc_fill_sg(struct ata_queued_cmd
*qc
)
575 struct ata_port
*ap
= qc
->ap
;
576 struct ata_bmdma_prd
*prd
= ap
->bmdma_prd
;
577 struct scatterlist
*sg
;
578 const u32 SG_COUNT_ASIC_BUG
= 41*4;
579 unsigned int si
, idx
;
582 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
586 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
590 /* determine if physical DMA addr spans 64K boundary.
591 * Note h/w doesn't support 64-bit, so we unconditionally
592 * truncate dma_addr_t to u32.
594 addr
= (u32
) sg_dma_address(sg
);
595 sg_len
= sg_dma_len(sg
);
598 offset
= addr
& 0xffff;
600 if ((offset
+ sg_len
) > 0x10000)
601 len
= 0x10000 - offset
;
603 prd
[idx
].addr
= cpu_to_le32(addr
);
604 prd
[idx
].flags_len
= cpu_to_le32(len
& 0xffff);
605 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx
, addr
, len
);
613 len
= le32_to_cpu(prd
[idx
- 1].flags_len
);
615 if (len
> SG_COUNT_ASIC_BUG
) {
618 VPRINTK("Splitting last PRD.\n");
620 addr
= le32_to_cpu(prd
[idx
- 1].addr
);
621 prd
[idx
- 1].flags_len
= cpu_to_le32(len
- SG_COUNT_ASIC_BUG
);
622 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx
- 1, addr
, SG_COUNT_ASIC_BUG
);
624 addr
= addr
+ len
- SG_COUNT_ASIC_BUG
;
625 len
= SG_COUNT_ASIC_BUG
;
626 prd
[idx
].addr
= cpu_to_le32(addr
);
627 prd
[idx
].flags_len
= cpu_to_le32(len
);
628 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx
, addr
, len
);
633 prd
[idx
- 1].flags_len
|= cpu_to_le32(ATA_PRD_EOT
);
636 static void pdc_qc_prep(struct ata_queued_cmd
*qc
)
638 struct pdc_port_priv
*pp
= qc
->ap
->private_data
;
643 switch (qc
->tf
.protocol
) {
647 case ATA_PROT_NODATA
:
648 i
= pdc_pkt_header(&qc
->tf
, qc
->ap
->bmdma_prd_dma
,
649 qc
->dev
->devno
, pp
->pkt
);
650 if (qc
->tf
.flags
& ATA_TFLAG_LBA48
)
651 i
= pdc_prep_lba48(&qc
->tf
, pp
->pkt
, i
);
653 i
= pdc_prep_lba28(&qc
->tf
, pp
->pkt
, i
);
654 pdc_pkt_footer(&qc
->tf
, pp
->pkt
, i
);
662 case ATAPI_PROT_NODATA
:
670 static int pdc_is_sataii_tx4(unsigned long flags
)
672 const unsigned long mask
= PDC_FLAG_GEN_II
| PDC_FLAG_4_PORTS
;
673 return (flags
& mask
) == mask
;
676 static unsigned int pdc_port_no_to_ata_no(unsigned int port_no
,
679 static const unsigned char sataii_tx4_port_remap
[4] = { 3, 1, 0, 2};
680 return is_sataii_tx4
? sataii_tx4_port_remap
[port_no
] : port_no
;
683 static unsigned int pdc_sata_nr_ports(const struct ata_port
*ap
)
685 return (ap
->flags
& PDC_FLAG_4_PORTS
) ? 4 : 2;
688 static unsigned int pdc_sata_ata_port_to_ata_no(const struct ata_port
*ap
)
690 const struct ata_host
*host
= ap
->host
;
691 unsigned int nr_ports
= pdc_sata_nr_ports(ap
);
694 for (i
= 0; i
< nr_ports
&& host
->ports
[i
] != ap
; ++i
)
696 BUG_ON(i
>= nr_ports
);
697 return pdc_port_no_to_ata_no(i
, pdc_is_sataii_tx4(ap
->flags
));
700 static void pdc_freeze(struct ata_port
*ap
)
702 void __iomem
*ata_mmio
= ap
->ioaddr
.cmd_addr
;
705 tmp
= readl(ata_mmio
+ PDC_CTLSTAT
);
706 tmp
|= PDC_IRQ_DISABLE
;
707 tmp
&= ~PDC_DMA_ENABLE
;
708 writel(tmp
, ata_mmio
+ PDC_CTLSTAT
);
709 readl(ata_mmio
+ PDC_CTLSTAT
); /* flush */
712 static void pdc_sata_freeze(struct ata_port
*ap
)
714 struct ata_host
*host
= ap
->host
;
715 void __iomem
*host_mmio
= host
->iomap
[PDC_MMIO_BAR
];
716 unsigned int hotplug_offset
= PDC2_SATA_PLUG_CSR
;
717 unsigned int ata_no
= pdc_sata_ata_port_to_ata_no(ap
);
720 /* Disable hotplug events on this port.
723 * 1) hotplug register accesses must be serialised via host->lock
724 * 2) ap->lock == &ap->host->lock
725 * 3) ->freeze() and ->thaw() are called with ap->lock held
727 hotplug_status
= readl(host_mmio
+ hotplug_offset
);
728 hotplug_status
|= 0x11 << (ata_no
+ 16);
729 writel(hotplug_status
, host_mmio
+ hotplug_offset
);
730 readl(host_mmio
+ hotplug_offset
); /* flush */
735 static void pdc_thaw(struct ata_port
*ap
)
737 void __iomem
*ata_mmio
= ap
->ioaddr
.cmd_addr
;
741 readl(ata_mmio
+ PDC_COMMAND
);
743 /* turn IRQ back on */
744 tmp
= readl(ata_mmio
+ PDC_CTLSTAT
);
745 tmp
&= ~PDC_IRQ_DISABLE
;
746 writel(tmp
, ata_mmio
+ PDC_CTLSTAT
);
747 readl(ata_mmio
+ PDC_CTLSTAT
); /* flush */
750 static void pdc_sata_thaw(struct ata_port
*ap
)
752 struct ata_host
*host
= ap
->host
;
753 void __iomem
*host_mmio
= host
->iomap
[PDC_MMIO_BAR
];
754 unsigned int hotplug_offset
= PDC2_SATA_PLUG_CSR
;
755 unsigned int ata_no
= pdc_sata_ata_port_to_ata_no(ap
);
760 /* Enable hotplug events on this port.
761 * Locking: see pdc_sata_freeze().
763 hotplug_status
= readl(host_mmio
+ hotplug_offset
);
764 hotplug_status
|= 0x11 << ata_no
;
765 hotplug_status
&= ~(0x11 << (ata_no
+ 16));
766 writel(hotplug_status
, host_mmio
+ hotplug_offset
);
767 readl(host_mmio
+ hotplug_offset
); /* flush */
770 static int pdc_pata_softreset(struct ata_link
*link
, unsigned int *class,
771 unsigned long deadline
)
773 pdc_reset_port(link
->ap
);
774 return ata_sff_softreset(link
, class, deadline
);
777 static unsigned int pdc_ata_port_to_ata_no(const struct ata_port
*ap
)
779 void __iomem
*ata_mmio
= ap
->ioaddr
.cmd_addr
;
780 void __iomem
*host_mmio
= ap
->host
->iomap
[PDC_MMIO_BAR
];
782 /* ata_mmio == host_mmio + 0x200 + ata_no * 0x80 */
783 return (ata_mmio
- host_mmio
- 0x200) / 0x80;
786 static void pdc_hard_reset_port(struct ata_port
*ap
)
788 void __iomem
*host_mmio
= ap
->host
->iomap
[PDC_MMIO_BAR
];
789 void __iomem
*pcictl_b1_mmio
= host_mmio
+ PDC_PCI_CTL
+ 1;
790 unsigned int ata_no
= pdc_ata_port_to_ata_no(ap
);
791 struct pdc_host_priv
*hpriv
= ap
->host
->private_data
;
794 spin_lock(&hpriv
->hard_reset_lock
);
796 tmp
= readb(pcictl_b1_mmio
);
797 tmp
&= ~(0x10 << ata_no
);
798 writeb(tmp
, pcictl_b1_mmio
);
799 readb(pcictl_b1_mmio
); /* flush */
801 tmp
|= (0x10 << ata_no
);
802 writeb(tmp
, pcictl_b1_mmio
);
803 readb(pcictl_b1_mmio
); /* flush */
805 spin_unlock(&hpriv
->hard_reset_lock
);
808 static int pdc_sata_hardreset(struct ata_link
*link
, unsigned int *class,
809 unsigned long deadline
)
811 if (link
->ap
->flags
& PDC_FLAG_GEN_II
)
812 pdc_not_at_command_packet_phase(link
->ap
);
813 /* hotplug IRQs should have been masked by pdc_sata_freeze() */
814 pdc_hard_reset_port(link
->ap
);
815 pdc_reset_port(link
->ap
);
817 /* sata_promise can't reliably acquire the first D2H Reg FIS
818 * after hardreset. Do non-waiting hardreset and request
821 return sata_std_hardreset(link
, class, deadline
);
824 static void pdc_error_handler(struct ata_port
*ap
)
826 if (!(ap
->pflags
& ATA_PFLAG_FROZEN
))
829 ata_sff_error_handler(ap
);
832 static void pdc_post_internal_cmd(struct ata_queued_cmd
*qc
)
834 struct ata_port
*ap
= qc
->ap
;
836 /* make DMA engine forget about the failed command */
837 if (qc
->flags
& ATA_QCFLAG_FAILED
)
841 static void pdc_error_intr(struct ata_port
*ap
, struct ata_queued_cmd
*qc
,
842 u32 port_status
, u32 err_mask
)
844 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
845 unsigned int ac_err_mask
= 0;
847 ata_ehi_clear_desc(ehi
);
848 ata_ehi_push_desc(ehi
, "port_status 0x%08x", port_status
);
849 port_status
&= err_mask
;
851 if (port_status
& PDC_DRIVE_ERR
)
852 ac_err_mask
|= AC_ERR_DEV
;
853 if (port_status
& (PDC_OVERRUN_ERR
| PDC_UNDERRUN_ERR
))
854 ac_err_mask
|= AC_ERR_OTHER
;
855 if (port_status
& (PDC2_ATA_HBA_ERR
| PDC2_ATA_DMA_CNT_ERR
))
856 ac_err_mask
|= AC_ERR_ATA_BUS
;
857 if (port_status
& (PDC_PH_ERR
| PDC_SH_ERR
| PDC_DH_ERR
| PDC2_HTO_ERR
858 | PDC_PCI_SYS_ERR
| PDC1_PCI_PARITY_ERR
))
859 ac_err_mask
|= AC_ERR_HOST_BUS
;
861 if (sata_scr_valid(&ap
->link
)) {
864 pdc_sata_scr_read(&ap
->link
, SCR_ERROR
, &serror
);
865 ehi
->serror
|= serror
;
868 qc
->err_mask
|= ac_err_mask
;
875 static unsigned int pdc_host_intr(struct ata_port
*ap
,
876 struct ata_queued_cmd
*qc
)
878 unsigned int handled
= 0;
879 void __iomem
*ata_mmio
= ap
->ioaddr
.cmd_addr
;
880 u32 port_status
, err_mask
;
882 err_mask
= PDC_ERR_MASK
;
883 if (ap
->flags
& PDC_FLAG_GEN_II
)
884 err_mask
&= ~PDC1_ERR_MASK
;
886 err_mask
&= ~PDC2_ERR_MASK
;
887 port_status
= readl(ata_mmio
+ PDC_GLOBAL_CTL
);
888 if (unlikely(port_status
& err_mask
)) {
889 pdc_error_intr(ap
, qc
, port_status
, err_mask
);
893 switch (qc
->tf
.protocol
) {
895 case ATA_PROT_NODATA
:
897 case ATAPI_PROT_NODATA
:
898 qc
->err_mask
|= ac_err_mask(ata_wait_idle(ap
));
903 ap
->stats
.idle_irq
++;
910 static void pdc_irq_clear(struct ata_port
*ap
)
912 void __iomem
*ata_mmio
= ap
->ioaddr
.cmd_addr
;
914 readl(ata_mmio
+ PDC_COMMAND
);
917 static irqreturn_t
pdc_interrupt(int irq
, void *dev_instance
)
919 struct ata_host
*host
= dev_instance
;
923 unsigned int handled
= 0;
924 void __iomem
*host_mmio
;
925 unsigned int hotplug_offset
, ata_no
;
931 if (!host
|| !host
->iomap
[PDC_MMIO_BAR
]) {
932 VPRINTK("QUICK EXIT\n");
936 host_mmio
= host
->iomap
[PDC_MMIO_BAR
];
938 spin_lock(&host
->lock
);
940 /* read and clear hotplug flags for all ports */
941 if (host
->ports
[0]->flags
& PDC_FLAG_GEN_II
) {
942 hotplug_offset
= PDC2_SATA_PLUG_CSR
;
943 hotplug_status
= readl(host_mmio
+ hotplug_offset
);
944 if (hotplug_status
& 0xff)
945 writel(hotplug_status
| 0xff, host_mmio
+ hotplug_offset
);
946 hotplug_status
&= 0xff; /* clear uninteresting bits */
950 /* reading should also clear interrupts */
951 mask
= readl(host_mmio
+ PDC_INT_SEQMASK
);
953 if (mask
== 0xffffffff && hotplug_status
== 0) {
954 VPRINTK("QUICK EXIT 2\n");
958 mask
&= 0xffff; /* only 16 SEQIDs possible */
959 if (mask
== 0 && hotplug_status
== 0) {
960 VPRINTK("QUICK EXIT 3\n");
964 writel(mask
, host_mmio
+ PDC_INT_SEQMASK
);
966 is_sataii_tx4
= pdc_is_sataii_tx4(host
->ports
[0]->flags
);
968 for (i
= 0; i
< host
->n_ports
; i
++) {
969 VPRINTK("port %u\n", i
);
972 /* check for a plug or unplug event */
973 ata_no
= pdc_port_no_to_ata_no(i
, is_sataii_tx4
);
974 tmp
= hotplug_status
& (0x11 << ata_no
);
976 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
977 ata_ehi_clear_desc(ehi
);
978 ata_ehi_hotplugged(ehi
);
979 ata_ehi_push_desc(ehi
, "hotplug_status %#x", tmp
);
985 /* check for a packet interrupt */
986 tmp
= mask
& (1 << (i
+ 1));
988 struct ata_queued_cmd
*qc
;
990 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
991 if (qc
&& (!(qc
->tf
.flags
& ATA_TFLAG_POLLING
)))
992 handled
+= pdc_host_intr(ap
, qc
);
999 spin_unlock(&host
->lock
);
1000 return IRQ_RETVAL(handled
);
1003 static void pdc_packet_start(struct ata_queued_cmd
*qc
)
1005 struct ata_port
*ap
= qc
->ap
;
1006 struct pdc_port_priv
*pp
= ap
->private_data
;
1007 void __iomem
*host_mmio
= ap
->host
->iomap
[PDC_MMIO_BAR
];
1008 void __iomem
*ata_mmio
= ap
->ioaddr
.cmd_addr
;
1009 unsigned int port_no
= ap
->port_no
;
1010 u8 seq
= (u8
) (port_no
+ 1);
1012 VPRINTK("ENTER, ap %p\n", ap
);
1014 writel(0x00000001, host_mmio
+ (seq
* 4));
1015 readl(host_mmio
+ (seq
* 4)); /* flush */
1018 wmb(); /* flush PRD, pkt writes */
1019 writel(pp
->pkt_dma
, ata_mmio
+ PDC_PKT_SUBMIT
);
1020 readl(ata_mmio
+ PDC_PKT_SUBMIT
); /* flush */
1023 static unsigned int pdc_qc_issue(struct ata_queued_cmd
*qc
)
1025 switch (qc
->tf
.protocol
) {
1026 case ATAPI_PROT_NODATA
:
1027 if (qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
)
1030 case ATA_PROT_NODATA
:
1031 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
1034 case ATAPI_PROT_DMA
:
1036 pdc_packet_start(qc
);
1041 return ata_sff_qc_issue(qc
);
1044 static void pdc_tf_load_mmio(struct ata_port
*ap
, const struct ata_taskfile
*tf
)
1046 WARN_ON(tf
->protocol
== ATA_PROT_DMA
|| tf
->protocol
== ATAPI_PROT_DMA
);
1047 ata_sff_tf_load(ap
, tf
);
1050 static void pdc_exec_command_mmio(struct ata_port
*ap
,
1051 const struct ata_taskfile
*tf
)
1053 WARN_ON(tf
->protocol
== ATA_PROT_DMA
|| tf
->protocol
== ATAPI_PROT_DMA
);
1054 ata_sff_exec_command(ap
, tf
);
1057 static int pdc_check_atapi_dma(struct ata_queued_cmd
*qc
)
1059 u8
*scsicmd
= qc
->scsicmd
->cmnd
;
1060 int pio
= 1; /* atapi dma off by default */
1062 /* Whitelist commands that may use DMA. */
1063 switch (scsicmd
[0]) {
1070 case 0xad: /* READ_DVD_STRUCTURE */
1071 case 0xbe: /* READ_CD */
1074 /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
1075 if (scsicmd
[0] == WRITE_10
) {
1077 (scsicmd
[2] << 24) |
1078 (scsicmd
[3] << 16) |
1081 if (lba
>= 0xFFFF4FA2)
1087 static int pdc_old_sata_check_atapi_dma(struct ata_queued_cmd
*qc
)
1089 /* First generation chips cannot use ATAPI DMA on SATA ports */
1093 static void pdc_ata_setup_port(struct ata_port
*ap
,
1094 void __iomem
*base
, void __iomem
*scr_addr
)
1096 ap
->ioaddr
.cmd_addr
= base
;
1097 ap
->ioaddr
.data_addr
= base
;
1098 ap
->ioaddr
.feature_addr
=
1099 ap
->ioaddr
.error_addr
= base
+ 0x4;
1100 ap
->ioaddr
.nsect_addr
= base
+ 0x8;
1101 ap
->ioaddr
.lbal_addr
= base
+ 0xc;
1102 ap
->ioaddr
.lbam_addr
= base
+ 0x10;
1103 ap
->ioaddr
.lbah_addr
= base
+ 0x14;
1104 ap
->ioaddr
.device_addr
= base
+ 0x18;
1105 ap
->ioaddr
.command_addr
=
1106 ap
->ioaddr
.status_addr
= base
+ 0x1c;
1107 ap
->ioaddr
.altstatus_addr
=
1108 ap
->ioaddr
.ctl_addr
= base
+ 0x38;
1109 ap
->ioaddr
.scr_addr
= scr_addr
;
1112 static void pdc_host_init(struct ata_host
*host
)
1114 void __iomem
*host_mmio
= host
->iomap
[PDC_MMIO_BAR
];
1115 int is_gen2
= host
->ports
[0]->flags
& PDC_FLAG_GEN_II
;
1120 hotplug_offset
= PDC2_SATA_PLUG_CSR
;
1122 hotplug_offset
= PDC_SATA_PLUG_CSR
;
1125 * Except for the hotplug stuff, this is voodoo from the
1126 * Promise driver. Label this entire section
1127 * "TODO: figure out why we do this"
1130 /* enable BMR_BURST, maybe change FIFO_SHD to 8 dwords */
1131 tmp
= readl(host_mmio
+ PDC_FLASH_CTL
);
1132 tmp
|= 0x02000; /* bit 13 (enable bmr burst) */
1134 tmp
|= 0x10000; /* bit 16 (fifo threshold at 8 dw) */
1135 writel(tmp
, host_mmio
+ PDC_FLASH_CTL
);
1137 /* clear plug/unplug flags for all ports */
1138 tmp
= readl(host_mmio
+ hotplug_offset
);
1139 writel(tmp
| 0xff, host_mmio
+ hotplug_offset
);
1141 tmp
= readl(host_mmio
+ hotplug_offset
);
1142 if (is_gen2
) /* unmask plug/unplug ints */
1143 writel(tmp
& ~0xff0000, host_mmio
+ hotplug_offset
);
1144 else /* mask plug/unplug ints */
1145 writel(tmp
| 0xff0000, host_mmio
+ hotplug_offset
);
1147 /* don't initialise TBG or SLEW on 2nd generation chips */
1151 /* reduce TBG clock to 133 Mhz. */
1152 tmp
= readl(host_mmio
+ PDC_TBG_MODE
);
1153 tmp
&= ~0x30000; /* clear bit 17, 16*/
1154 tmp
|= 0x10000; /* set bit 17:16 = 0:1 */
1155 writel(tmp
, host_mmio
+ PDC_TBG_MODE
);
1157 readl(host_mmio
+ PDC_TBG_MODE
); /* flush */
1160 /* adjust slew rate control register. */
1161 tmp
= readl(host_mmio
+ PDC_SLEW_CTL
);
1162 tmp
&= 0xFFFFF03F; /* clear bit 11 ~ 6 */
1163 tmp
|= 0x00000900; /* set bit 11-9 = 100b , bit 8-6 = 100 */
1164 writel(tmp
, host_mmio
+ PDC_SLEW_CTL
);
1167 static int pdc_ata_init_one(struct pci_dev
*pdev
,
1168 const struct pci_device_id
*ent
)
1170 const struct ata_port_info
*pi
= &pdc_port_info
[ent
->driver_data
];
1171 const struct ata_port_info
*ppi
[PDC_MAX_PORTS
];
1172 struct ata_host
*host
;
1173 struct pdc_host_priv
*hpriv
;
1174 void __iomem
*host_mmio
;
1178 ata_print_version_once(&pdev
->dev
, DRV_VERSION
);
1180 /* enable and acquire resources */
1181 rc
= pcim_enable_device(pdev
);
1185 rc
= pcim_iomap_regions(pdev
, 1 << PDC_MMIO_BAR
, DRV_NAME
);
1187 pcim_pin_device(pdev
);
1190 host_mmio
= pcim_iomap_table(pdev
)[PDC_MMIO_BAR
];
1192 /* determine port configuration and setup host */
1194 if (pi
->flags
& PDC_FLAG_4_PORTS
)
1196 for (i
= 0; i
< n_ports
; i
++)
1199 if (pi
->flags
& PDC_FLAG_SATA_PATA
) {
1200 u8 tmp
= readb(host_mmio
+ PDC_FLASH_CTL
+ 1);
1202 ppi
[n_ports
++] = pi
+ 1;
1205 host
= ata_host_alloc_pinfo(&pdev
->dev
, ppi
, n_ports
);
1207 dev_err(&pdev
->dev
, "failed to allocate host\n");
1210 hpriv
= devm_kzalloc(&pdev
->dev
, sizeof *hpriv
, GFP_KERNEL
);
1213 spin_lock_init(&hpriv
->hard_reset_lock
);
1214 host
->private_data
= hpriv
;
1215 host
->iomap
= pcim_iomap_table(pdev
);
1217 is_sataii_tx4
= pdc_is_sataii_tx4(pi
->flags
);
1218 for (i
= 0; i
< host
->n_ports
; i
++) {
1219 struct ata_port
*ap
= host
->ports
[i
];
1220 unsigned int ata_no
= pdc_port_no_to_ata_no(i
, is_sataii_tx4
);
1221 unsigned int ata_offset
= 0x200 + ata_no
* 0x80;
1222 unsigned int scr_offset
= 0x400 + ata_no
* 0x100;
1224 pdc_ata_setup_port(ap
, host_mmio
+ ata_offset
, host_mmio
+ scr_offset
);
1226 ata_port_pbar_desc(ap
, PDC_MMIO_BAR
, -1, "mmio");
1227 ata_port_pbar_desc(ap
, PDC_MMIO_BAR
, ata_offset
, "ata");
1230 /* initialize adapter */
1231 pdc_host_init(host
);
1233 rc
= dma_set_mask(&pdev
->dev
, ATA_DMA_MASK
);
1236 rc
= dma_set_coherent_mask(&pdev
->dev
, ATA_DMA_MASK
);
1240 /* start host, request IRQ and attach */
1241 pci_set_master(pdev
);
1242 return ata_host_activate(host
, pdev
->irq
, pdc_interrupt
, IRQF_SHARED
,
1246 module_pci_driver(pdc_ata_pci_driver
);
1248 MODULE_AUTHOR("Jeff Garzik");
1249 MODULE_DESCRIPTION("Promise ATA TX2/TX4/TX4000 low-level driver");
1250 MODULE_LICENSE("GPL");
1251 MODULE_DEVICE_TABLE(pci
, pdc_ata_pci_tbl
);
1252 MODULE_VERSION(DRV_VERSION
);