net: DCB: Validate DCB_ATTR_DCB_BUFFER argument
[linux/fpc-iii.git] / drivers / ata / sata_vsc.c
blob8fa952cb9f7f4bf150db781f4d8ba1e09f5992d0
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * sata_vsc.c - Vitesse VSC7174 4 port DPA SATA
5 * Maintained by: Jeremy Higdon @ SGI
6 * Please ALWAYS copy linux-ide@vger.kernel.org
7 * on emails.
9 * Copyright 2004 SGI
11 * Bits from Jeff Garzik, Copyright RedHat, Inc.
13 * libata documentation is available via 'make {ps|pdf}docs',
14 * as Documentation/driver-api/libata.rst
16 * Vitesse hardware documentation presumably available under NDA.
17 * Intel 31244 (same hardware interface) documentation presumably
18 * available from http://developer.intel.com/
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/pci.h>
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/device.h>
29 #include <scsi/scsi_host.h>
30 #include <linux/libata.h>
32 #define DRV_NAME "sata_vsc"
33 #define DRV_VERSION "2.3"
35 enum {
36 VSC_MMIO_BAR = 0,
38 /* Interrupt register offsets (from chip base address) */
39 VSC_SATA_INT_STAT_OFFSET = 0x00,
40 VSC_SATA_INT_MASK_OFFSET = 0x04,
42 /* Taskfile registers offsets */
43 VSC_SATA_TF_CMD_OFFSET = 0x00,
44 VSC_SATA_TF_DATA_OFFSET = 0x00,
45 VSC_SATA_TF_ERROR_OFFSET = 0x04,
46 VSC_SATA_TF_FEATURE_OFFSET = 0x06,
47 VSC_SATA_TF_NSECT_OFFSET = 0x08,
48 VSC_SATA_TF_LBAL_OFFSET = 0x0c,
49 VSC_SATA_TF_LBAM_OFFSET = 0x10,
50 VSC_SATA_TF_LBAH_OFFSET = 0x14,
51 VSC_SATA_TF_DEVICE_OFFSET = 0x18,
52 VSC_SATA_TF_STATUS_OFFSET = 0x1c,
53 VSC_SATA_TF_COMMAND_OFFSET = 0x1d,
54 VSC_SATA_TF_ALTSTATUS_OFFSET = 0x28,
55 VSC_SATA_TF_CTL_OFFSET = 0x29,
57 /* DMA base */
58 VSC_SATA_UP_DESCRIPTOR_OFFSET = 0x64,
59 VSC_SATA_UP_DATA_BUFFER_OFFSET = 0x6C,
60 VSC_SATA_DMA_CMD_OFFSET = 0x70,
62 /* SCRs base */
63 VSC_SATA_SCR_STATUS_OFFSET = 0x100,
64 VSC_SATA_SCR_ERROR_OFFSET = 0x104,
65 VSC_SATA_SCR_CONTROL_OFFSET = 0x108,
67 /* Port stride */
68 VSC_SATA_PORT_OFFSET = 0x200,
70 /* Error interrupt status bit offsets */
71 VSC_SATA_INT_ERROR_CRC = 0x40,
72 VSC_SATA_INT_ERROR_T = 0x20,
73 VSC_SATA_INT_ERROR_P = 0x10,
74 VSC_SATA_INT_ERROR_R = 0x8,
75 VSC_SATA_INT_ERROR_E = 0x4,
76 VSC_SATA_INT_ERROR_M = 0x2,
77 VSC_SATA_INT_PHY_CHANGE = 0x1,
78 VSC_SATA_INT_ERROR = (VSC_SATA_INT_ERROR_CRC | VSC_SATA_INT_ERROR_T | \
79 VSC_SATA_INT_ERROR_P | VSC_SATA_INT_ERROR_R | \
80 VSC_SATA_INT_ERROR_E | VSC_SATA_INT_ERROR_M | \
81 VSC_SATA_INT_PHY_CHANGE),
84 static int vsc_sata_scr_read(struct ata_link *link,
85 unsigned int sc_reg, u32 *val)
87 if (sc_reg > SCR_CONTROL)
88 return -EINVAL;
89 *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4));
90 return 0;
94 static int vsc_sata_scr_write(struct ata_link *link,
95 unsigned int sc_reg, u32 val)
97 if (sc_reg > SCR_CONTROL)
98 return -EINVAL;
99 writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
100 return 0;
104 static void vsc_freeze(struct ata_port *ap)
106 void __iomem *mask_addr;
108 mask_addr = ap->host->iomap[VSC_MMIO_BAR] +
109 VSC_SATA_INT_MASK_OFFSET + ap->port_no;
111 writeb(0, mask_addr);
115 static void vsc_thaw(struct ata_port *ap)
117 void __iomem *mask_addr;
119 mask_addr = ap->host->iomap[VSC_MMIO_BAR] +
120 VSC_SATA_INT_MASK_OFFSET + ap->port_no;
122 writeb(0xff, mask_addr);
126 static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl)
128 void __iomem *mask_addr;
129 u8 mask;
131 mask_addr = ap->host->iomap[VSC_MMIO_BAR] +
132 VSC_SATA_INT_MASK_OFFSET + ap->port_no;
133 mask = readb(mask_addr);
134 if (ctl & ATA_NIEN)
135 mask |= 0x80;
136 else
137 mask &= 0x7F;
138 writeb(mask, mask_addr);
142 static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
144 struct ata_ioports *ioaddr = &ap->ioaddr;
145 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
148 * The only thing the ctl register is used for is SRST.
149 * That is not enabled or disabled via tf_load.
150 * However, if ATA_NIEN is changed, then we need to change
151 * the interrupt register.
153 if ((tf->ctl & ATA_NIEN) != (ap->last_ctl & ATA_NIEN)) {
154 ap->last_ctl = tf->ctl;
155 vsc_intr_mask_update(ap, tf->ctl & ATA_NIEN);
157 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
158 writew(tf->feature | (((u16)tf->hob_feature) << 8),
159 ioaddr->feature_addr);
160 writew(tf->nsect | (((u16)tf->hob_nsect) << 8),
161 ioaddr->nsect_addr);
162 writew(tf->lbal | (((u16)tf->hob_lbal) << 8),
163 ioaddr->lbal_addr);
164 writew(tf->lbam | (((u16)tf->hob_lbam) << 8),
165 ioaddr->lbam_addr);
166 writew(tf->lbah | (((u16)tf->hob_lbah) << 8),
167 ioaddr->lbah_addr);
168 } else if (is_addr) {
169 writew(tf->feature, ioaddr->feature_addr);
170 writew(tf->nsect, ioaddr->nsect_addr);
171 writew(tf->lbal, ioaddr->lbal_addr);
172 writew(tf->lbam, ioaddr->lbam_addr);
173 writew(tf->lbah, ioaddr->lbah_addr);
176 if (tf->flags & ATA_TFLAG_DEVICE)
177 writeb(tf->device, ioaddr->device_addr);
179 ata_wait_idle(ap);
183 static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
185 struct ata_ioports *ioaddr = &ap->ioaddr;
186 u16 nsect, lbal, lbam, lbah, feature;
188 tf->command = ata_sff_check_status(ap);
189 tf->device = readw(ioaddr->device_addr);
190 feature = readw(ioaddr->error_addr);
191 nsect = readw(ioaddr->nsect_addr);
192 lbal = readw(ioaddr->lbal_addr);
193 lbam = readw(ioaddr->lbam_addr);
194 lbah = readw(ioaddr->lbah_addr);
196 tf->feature = feature;
197 tf->nsect = nsect;
198 tf->lbal = lbal;
199 tf->lbam = lbam;
200 tf->lbah = lbah;
202 if (tf->flags & ATA_TFLAG_LBA48) {
203 tf->hob_feature = feature >> 8;
204 tf->hob_nsect = nsect >> 8;
205 tf->hob_lbal = lbal >> 8;
206 tf->hob_lbam = lbam >> 8;
207 tf->hob_lbah = lbah >> 8;
211 static inline void vsc_error_intr(u8 port_status, struct ata_port *ap)
213 if (port_status & (VSC_SATA_INT_PHY_CHANGE | VSC_SATA_INT_ERROR_M))
214 ata_port_freeze(ap);
215 else
216 ata_port_abort(ap);
219 static void vsc_port_intr(u8 port_status, struct ata_port *ap)
221 struct ata_queued_cmd *qc;
222 int handled = 0;
224 if (unlikely(port_status & VSC_SATA_INT_ERROR)) {
225 vsc_error_intr(port_status, ap);
226 return;
229 qc = ata_qc_from_tag(ap, ap->link.active_tag);
230 if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING)))
231 handled = ata_bmdma_port_intr(ap, qc);
233 /* We received an interrupt during a polled command,
234 * or some other spurious condition. Interrupt reporting
235 * with this hardware is fairly reliable so it is safe to
236 * simply clear the interrupt
238 if (unlikely(!handled))
239 ap->ops->sff_check_status(ap);
243 * vsc_sata_interrupt
245 * Read the interrupt register and process for the devices that have
246 * them pending.
248 static irqreturn_t vsc_sata_interrupt(int irq, void *dev_instance)
250 struct ata_host *host = dev_instance;
251 unsigned int i;
252 unsigned int handled = 0;
253 u32 status;
255 status = readl(host->iomap[VSC_MMIO_BAR] + VSC_SATA_INT_STAT_OFFSET);
257 if (unlikely(status == 0xffffffff || status == 0)) {
258 if (status)
259 dev_err(host->dev,
260 ": IRQ status == 0xffffffff, PCI fault or device removal?\n");
261 goto out;
264 spin_lock(&host->lock);
266 for (i = 0; i < host->n_ports; i++) {
267 u8 port_status = (status >> (8 * i)) & 0xff;
268 if (port_status) {
269 vsc_port_intr(port_status, host->ports[i]);
270 handled++;
274 spin_unlock(&host->lock);
275 out:
276 return IRQ_RETVAL(handled);
280 static struct scsi_host_template vsc_sata_sht = {
281 ATA_BMDMA_SHT(DRV_NAME),
285 static struct ata_port_operations vsc_sata_ops = {
286 .inherits = &ata_bmdma_port_ops,
287 /* The IRQ handling is not quite standard SFF behaviour so we
288 cannot use the default lost interrupt handler */
289 .lost_interrupt = ATA_OP_NULL,
290 .sff_tf_load = vsc_sata_tf_load,
291 .sff_tf_read = vsc_sata_tf_read,
292 .freeze = vsc_freeze,
293 .thaw = vsc_thaw,
294 .scr_read = vsc_sata_scr_read,
295 .scr_write = vsc_sata_scr_write,
298 static void vsc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
300 port->cmd_addr = base + VSC_SATA_TF_CMD_OFFSET;
301 port->data_addr = base + VSC_SATA_TF_DATA_OFFSET;
302 port->error_addr = base + VSC_SATA_TF_ERROR_OFFSET;
303 port->feature_addr = base + VSC_SATA_TF_FEATURE_OFFSET;
304 port->nsect_addr = base + VSC_SATA_TF_NSECT_OFFSET;
305 port->lbal_addr = base + VSC_SATA_TF_LBAL_OFFSET;
306 port->lbam_addr = base + VSC_SATA_TF_LBAM_OFFSET;
307 port->lbah_addr = base + VSC_SATA_TF_LBAH_OFFSET;
308 port->device_addr = base + VSC_SATA_TF_DEVICE_OFFSET;
309 port->status_addr = base + VSC_SATA_TF_STATUS_OFFSET;
310 port->command_addr = base + VSC_SATA_TF_COMMAND_OFFSET;
311 port->altstatus_addr = base + VSC_SATA_TF_ALTSTATUS_OFFSET;
312 port->ctl_addr = base + VSC_SATA_TF_CTL_OFFSET;
313 port->bmdma_addr = base + VSC_SATA_DMA_CMD_OFFSET;
314 port->scr_addr = base + VSC_SATA_SCR_STATUS_OFFSET;
315 writel(0, base + VSC_SATA_UP_DESCRIPTOR_OFFSET);
316 writel(0, base + VSC_SATA_UP_DATA_BUFFER_OFFSET);
320 static int vsc_sata_init_one(struct pci_dev *pdev,
321 const struct pci_device_id *ent)
323 static const struct ata_port_info pi = {
324 .flags = ATA_FLAG_SATA,
325 .pio_mask = ATA_PIO4,
326 .mwdma_mask = ATA_MWDMA2,
327 .udma_mask = ATA_UDMA6,
328 .port_ops = &vsc_sata_ops,
330 const struct ata_port_info *ppi[] = { &pi, NULL };
331 struct ata_host *host;
332 void __iomem *mmio_base;
333 int i, rc;
334 u8 cls;
336 ata_print_version_once(&pdev->dev, DRV_VERSION);
338 /* allocate host */
339 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
340 if (!host)
341 return -ENOMEM;
343 rc = pcim_enable_device(pdev);
344 if (rc)
345 return rc;
347 /* check if we have needed resource mapped */
348 if (pci_resource_len(pdev, 0) == 0)
349 return -ENODEV;
351 /* map IO regions and initialize host accordingly */
352 rc = pcim_iomap_regions(pdev, 1 << VSC_MMIO_BAR, DRV_NAME);
353 if (rc == -EBUSY)
354 pcim_pin_device(pdev);
355 if (rc)
356 return rc;
357 host->iomap = pcim_iomap_table(pdev);
359 mmio_base = host->iomap[VSC_MMIO_BAR];
361 for (i = 0; i < host->n_ports; i++) {
362 struct ata_port *ap = host->ports[i];
363 unsigned int offset = (i + 1) * VSC_SATA_PORT_OFFSET;
365 vsc_sata_setup_port(&ap->ioaddr, mmio_base + offset);
367 ata_port_pbar_desc(ap, VSC_MMIO_BAR, -1, "mmio");
368 ata_port_pbar_desc(ap, VSC_MMIO_BAR, offset, "port");
372 * Use 32 bit DMA mask, because 64 bit address support is poor.
374 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
375 if (rc)
376 return rc;
379 * Due to a bug in the chip, the default cache line size can't be
380 * used (unless the default is non-zero).
382 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cls);
383 if (cls == 0x00)
384 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80);
386 if (pci_enable_msi(pdev) == 0)
387 pci_intx(pdev, 0);
390 * Config offset 0x98 is "Extended Control and Status Register 0"
391 * Default value is (1 << 28). All bits except bit 28 are reserved in
392 * DPA mode. If bit 28 is set, LED 0 reflects all ports' activity.
393 * If bit 28 is clear, each port has its own LED.
395 pci_write_config_dword(pdev, 0x98, 0);
397 pci_set_master(pdev);
398 return ata_host_activate(host, pdev->irq, vsc_sata_interrupt,
399 IRQF_SHARED, &vsc_sata_sht);
402 static const struct pci_device_id vsc_sata_pci_tbl[] = {
403 { PCI_VENDOR_ID_VITESSE, 0x7174,
404 PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
405 { PCI_VENDOR_ID_INTEL, 0x3200,
406 PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
408 { } /* terminate list */
411 static struct pci_driver vsc_sata_pci_driver = {
412 .name = DRV_NAME,
413 .id_table = vsc_sata_pci_tbl,
414 .probe = vsc_sata_init_one,
415 .remove = ata_pci_remove_one,
418 module_pci_driver(vsc_sata_pci_driver);
420 MODULE_AUTHOR("Jeremy Higdon");
421 MODULE_DESCRIPTION("low-level driver for Vitesse VSC7174 SATA controller");
422 MODULE_LICENSE("GPL");
423 MODULE_DEVICE_TABLE(pci, vsc_sata_pci_tbl);
424 MODULE_VERSION(DRV_VERSION);