Linux 4.18.10
[linux/fpc-iii.git] / drivers / scsi / aacraid / src.c
blob4ebb35a29caadf0bd57b5295a78f0d0ed3eea4ee
1 /*
2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc.
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
8 * Copyright (c) 2000-2010 Adaptec, Inc.
9 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
10 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 * Module Name:
27 * src.c
29 * Abstract: Hardware Device Interface for PMC SRC based controllers
33 #include <linux/kernel.h>
34 #include <linux/init.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/spinlock.h>
38 #include <linux/slab.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/completion.h>
42 #include <linux/time.h>
43 #include <linux/interrupt.h>
44 #include <scsi/scsi_host.h>
46 #include "aacraid.h"
48 static int aac_src_get_sync_status(struct aac_dev *dev);
50 static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
52 struct aac_msix_ctx *ctx;
53 struct aac_dev *dev;
54 unsigned long bellbits, bellbits_shifted;
55 int vector_no;
56 int isFastResponse, mode;
57 u32 index, handle;
59 ctx = (struct aac_msix_ctx *)dev_id;
60 dev = ctx->dev;
61 vector_no = ctx->vector_no;
63 if (dev->msi_enabled) {
64 mode = AAC_INT_MODE_MSI;
65 if (vector_no == 0) {
66 bellbits = src_readl(dev, MUnit.ODR_MSI);
67 if (bellbits & 0x40000)
68 mode |= AAC_INT_MODE_AIF;
69 if (bellbits & 0x1000)
70 mode |= AAC_INT_MODE_SYNC;
72 } else {
73 mode = AAC_INT_MODE_INTX;
74 bellbits = src_readl(dev, MUnit.ODR_R);
75 if (bellbits & PmDoorBellResponseSent) {
76 bellbits = PmDoorBellResponseSent;
77 src_writel(dev, MUnit.ODR_C, bellbits);
78 src_readl(dev, MUnit.ODR_C);
79 } else {
80 bellbits_shifted = (bellbits >> SRC_ODR_SHIFT);
81 src_writel(dev, MUnit.ODR_C, bellbits);
82 src_readl(dev, MUnit.ODR_C);
84 if (bellbits_shifted & DoorBellAifPending)
85 mode |= AAC_INT_MODE_AIF;
86 else if (bellbits_shifted & OUTBOUNDDOORBELL_0)
87 mode |= AAC_INT_MODE_SYNC;
91 if (mode & AAC_INT_MODE_SYNC) {
92 unsigned long sflags;
93 struct list_head *entry;
94 int send_it = 0;
95 extern int aac_sync_mode;
97 if (!aac_sync_mode && !dev->msi_enabled) {
98 src_writel(dev, MUnit.ODR_C, bellbits);
99 src_readl(dev, MUnit.ODR_C);
102 if (dev->sync_fib) {
103 if (dev->sync_fib->callback)
104 dev->sync_fib->callback(dev->sync_fib->callback_data,
105 dev->sync_fib);
106 spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
107 if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
108 dev->management_fib_count--;
109 up(&dev->sync_fib->event_wait);
111 spin_unlock_irqrestore(&dev->sync_fib->event_lock,
112 sflags);
113 spin_lock_irqsave(&dev->sync_lock, sflags);
114 if (!list_empty(&dev->sync_fib_list)) {
115 entry = dev->sync_fib_list.next;
116 dev->sync_fib = list_entry(entry,
117 struct fib,
118 fiblink);
119 list_del(entry);
120 send_it = 1;
121 } else {
122 dev->sync_fib = NULL;
124 spin_unlock_irqrestore(&dev->sync_lock, sflags);
125 if (send_it) {
126 aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
127 (u32)dev->sync_fib->hw_fib_pa,
128 0, 0, 0, 0, 0,
129 NULL, NULL, NULL, NULL, NULL);
132 if (!dev->msi_enabled)
133 mode = 0;
137 if (mode & AAC_INT_MODE_AIF) {
138 /* handle AIF */
139 if (dev->sa_firmware) {
140 u32 events = src_readl(dev, MUnit.SCR0);
142 aac_intr_normal(dev, events, 1, 0, NULL);
143 writel(events, &dev->IndexRegs->Mailbox[0]);
144 src_writel(dev, MUnit.IDR, 1 << 23);
145 } else {
146 if (dev->aif_thread && dev->fsa_dev)
147 aac_intr_normal(dev, 0, 2, 0, NULL);
149 if (dev->msi_enabled)
150 aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT);
151 mode = 0;
154 if (mode) {
155 index = dev->host_rrq_idx[vector_no];
157 for (;;) {
158 isFastResponse = 0;
159 /* remove toggle bit (31) */
160 handle = le32_to_cpu((dev->host_rrq[index])
161 & 0x7fffffff);
162 /* check fast response bits (30, 1) */
163 if (handle & 0x40000000)
164 isFastResponse = 1;
165 handle &= 0x0000ffff;
166 if (handle == 0)
167 break;
168 handle >>= 2;
169 if (dev->msi_enabled && dev->max_msix > 1)
170 atomic_dec(&dev->rrq_outstanding[vector_no]);
171 aac_intr_normal(dev, handle, 0, isFastResponse, NULL);
172 dev->host_rrq[index++] = 0;
173 if (index == (vector_no + 1) * dev->vector_cap)
174 index = vector_no * dev->vector_cap;
175 dev->host_rrq_idx[vector_no] = index;
177 mode = 0;
180 return IRQ_HANDLED;
184 * aac_src_disable_interrupt - Disable interrupts
185 * @dev: Adapter
188 static void aac_src_disable_interrupt(struct aac_dev *dev)
190 src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
194 * aac_src_enable_interrupt_message - Enable interrupts
195 * @dev: Adapter
198 static void aac_src_enable_interrupt_message(struct aac_dev *dev)
200 aac_src_access_devreg(dev, AAC_ENABLE_INTERRUPT);
204 * src_sync_cmd - send a command and wait
205 * @dev: Adapter
206 * @command: Command to execute
207 * @p1: first parameter
208 * @ret: adapter status
210 * This routine will send a synchronous command to the adapter and wait
211 * for its completion.
214 static int src_sync_cmd(struct aac_dev *dev, u32 command,
215 u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
216 u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4)
218 unsigned long start;
219 unsigned long delay;
220 int ok;
223 * Write the command into Mailbox 0
225 writel(command, &dev->IndexRegs->Mailbox[0]);
227 * Write the parameters into Mailboxes 1 - 6
229 writel(p1, &dev->IndexRegs->Mailbox[1]);
230 writel(p2, &dev->IndexRegs->Mailbox[2]);
231 writel(p3, &dev->IndexRegs->Mailbox[3]);
232 writel(p4, &dev->IndexRegs->Mailbox[4]);
235 * Clear the synch command doorbell to start on a clean slate.
237 if (!dev->msi_enabled)
238 src_writel(dev,
239 MUnit.ODR_C,
240 OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
243 * Disable doorbell interrupts
245 src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
248 * Force the completion of the mask register write before issuing
249 * the interrupt.
251 src_readl(dev, MUnit.OIMR);
254 * Signal that there is a new synch command
256 src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT);
258 if ((!dev->sync_mode || command != SEND_SYNCHRONOUS_FIB) &&
259 !dev->in_soft_reset) {
260 ok = 0;
261 start = jiffies;
263 if (command == IOP_RESET_ALWAYS) {
264 /* Wait up to 10 sec */
265 delay = 10*HZ;
266 } else {
267 /* Wait up to 5 minutes */
268 delay = 300*HZ;
270 while (time_before(jiffies, start+delay)) {
271 udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
273 * Mon960 will set doorbell0 bit when it has completed the command.
275 if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) {
277 * Clear the doorbell.
279 if (dev->msi_enabled)
280 aac_src_access_devreg(dev,
281 AAC_CLEAR_SYNC_BIT);
282 else
283 src_writel(dev,
284 MUnit.ODR_C,
285 OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
286 ok = 1;
287 break;
290 * Yield the processor in case we are slow
292 msleep(1);
294 if (unlikely(ok != 1)) {
296 * Restore interrupt mask even though we timed out
298 aac_adapter_enable_int(dev);
299 return -ETIMEDOUT;
302 * Pull the synch status from Mailbox 0.
304 if (status)
305 *status = readl(&dev->IndexRegs->Mailbox[0]);
306 if (r1)
307 *r1 = readl(&dev->IndexRegs->Mailbox[1]);
308 if (r2)
309 *r2 = readl(&dev->IndexRegs->Mailbox[2]);
310 if (r3)
311 *r3 = readl(&dev->IndexRegs->Mailbox[3]);
312 if (r4)
313 *r4 = readl(&dev->IndexRegs->Mailbox[4]);
314 if (command == GET_COMM_PREFERRED_SETTINGS)
315 dev->max_msix =
316 readl(&dev->IndexRegs->Mailbox[5]) & 0xFFFF;
318 * Clear the synch command doorbell.
320 if (!dev->msi_enabled)
321 src_writel(dev,
322 MUnit.ODR_C,
323 OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
327 * Restore interrupt mask
329 aac_adapter_enable_int(dev);
330 return 0;
334 * aac_src_interrupt_adapter - interrupt adapter
335 * @dev: Adapter
337 * Send an interrupt to the i960 and breakpoint it.
340 static void aac_src_interrupt_adapter(struct aac_dev *dev)
342 src_sync_cmd(dev, BREAKPOINT_REQUEST,
343 0, 0, 0, 0, 0, 0,
344 NULL, NULL, NULL, NULL, NULL);
348 * aac_src_notify_adapter - send an event to the adapter
349 * @dev: Adapter
350 * @event: Event to send
352 * Notify the i960 that something it probably cares about has
353 * happened.
356 static void aac_src_notify_adapter(struct aac_dev *dev, u32 event)
358 switch (event) {
360 case AdapNormCmdQue:
361 src_writel(dev, MUnit.ODR_C,
362 INBOUNDDOORBELL_1 << SRC_ODR_SHIFT);
363 break;
364 case HostNormRespNotFull:
365 src_writel(dev, MUnit.ODR_C,
366 INBOUNDDOORBELL_4 << SRC_ODR_SHIFT);
367 break;
368 case AdapNormRespQue:
369 src_writel(dev, MUnit.ODR_C,
370 INBOUNDDOORBELL_2 << SRC_ODR_SHIFT);
371 break;
372 case HostNormCmdNotFull:
373 src_writel(dev, MUnit.ODR_C,
374 INBOUNDDOORBELL_3 << SRC_ODR_SHIFT);
375 break;
376 case FastIo:
377 src_writel(dev, MUnit.ODR_C,
378 INBOUNDDOORBELL_6 << SRC_ODR_SHIFT);
379 break;
380 case AdapPrintfDone:
381 src_writel(dev, MUnit.ODR_C,
382 INBOUNDDOORBELL_5 << SRC_ODR_SHIFT);
383 break;
384 default:
385 BUG();
386 break;
391 * aac_src_start_adapter - activate adapter
392 * @dev: Adapter
394 * Start up processing on an i960 based AAC adapter
397 static void aac_src_start_adapter(struct aac_dev *dev)
399 union aac_init *init;
400 int i;
402 /* reset host_rrq_idx first */
403 for (i = 0; i < dev->max_msix; i++) {
404 dev->host_rrq_idx[i] = i * dev->vector_cap;
405 atomic_set(&dev->rrq_outstanding[i], 0);
407 atomic_set(&dev->msix_counter, 0);
408 dev->fibs_pushed_no = 0;
410 init = dev->init;
411 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
412 init->r8.host_elapsed_seconds = cpu_to_le32(get_seconds());
413 src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
414 lower_32_bits(dev->init_pa),
415 upper_32_bits(dev->init_pa),
416 sizeof(struct _r8) +
417 (AAC_MAX_HRRQ - 1) * sizeof(struct _rrq),
418 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
419 } else {
420 init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds());
421 // We can only use a 32 bit address here
422 src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
423 (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
424 NULL, NULL, NULL, NULL, NULL);
430 * aac_src_check_health
431 * @dev: device to check if healthy
433 * Will attempt to determine if the specified adapter is alive and
434 * capable of handling requests, returning 0 if alive.
436 static int aac_src_check_health(struct aac_dev *dev)
438 u32 status = src_readl(dev, MUnit.OMR);
441 * Check to see if the board panic'd.
443 if (unlikely(status & KERNEL_PANIC))
444 goto err_blink;
447 * Check to see if the board failed any self tests.
449 if (unlikely(status & SELF_TEST_FAILED))
450 goto err_out;
453 * Check to see if the board failed any self tests.
455 if (unlikely(status & MONITOR_PANIC))
456 goto err_out;
459 * Wait for the adapter to be up and running.
461 if (unlikely(!(status & KERNEL_UP_AND_RUNNING)))
462 return -3;
464 * Everything is OK
466 return 0;
468 err_out:
469 return -1;
471 err_blink:
472 return (status >> 16) & 0xFF;
475 static inline u32 aac_get_vector(struct aac_dev *dev)
477 return atomic_inc_return(&dev->msix_counter)%dev->max_msix;
481 * aac_src_deliver_message
482 * @fib: fib to issue
484 * Will send a fib, returning 0 if successful.
486 static int aac_src_deliver_message(struct fib *fib)
488 struct aac_dev *dev = fib->dev;
489 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
490 u32 fibsize;
491 dma_addr_t address;
492 struct aac_fib_xporthdr *pFibX;
493 int native_hba;
494 #if !defined(writeq)
495 unsigned long flags;
496 #endif
498 u16 vector_no;
500 atomic_inc(&q->numpending);
502 native_hba = (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) ? 1 : 0;
505 if (dev->msi_enabled && dev->max_msix > 1 &&
506 (native_hba || fib->hw_fib_va->header.Command != AifRequest)) {
508 if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)
509 && dev->sa_firmware)
510 vector_no = aac_get_vector(dev);
511 else
512 vector_no = fib->vector_no;
514 if (native_hba) {
515 if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) {
516 struct aac_hba_tm_req *tm_req;
518 tm_req = (struct aac_hba_tm_req *)
519 fib->hw_fib_va;
520 if (tm_req->iu_type ==
521 HBA_IU_TYPE_SCSI_TM_REQ) {
522 ((struct aac_hba_tm_req *)
523 fib->hw_fib_va)->reply_qid
524 = vector_no;
525 ((struct aac_hba_tm_req *)
526 fib->hw_fib_va)->request_id
527 += (vector_no << 16);
528 } else {
529 ((struct aac_hba_reset_req *)
530 fib->hw_fib_va)->reply_qid
531 = vector_no;
532 ((struct aac_hba_reset_req *)
533 fib->hw_fib_va)->request_id
534 += (vector_no << 16);
536 } else {
537 ((struct aac_hba_cmd_req *)
538 fib->hw_fib_va)->reply_qid
539 = vector_no;
540 ((struct aac_hba_cmd_req *)
541 fib->hw_fib_va)->request_id
542 += (vector_no << 16);
544 } else {
545 fib->hw_fib_va->header.Handle += (vector_no << 16);
547 } else {
548 vector_no = 0;
551 atomic_inc(&dev->rrq_outstanding[vector_no]);
553 if (native_hba) {
554 address = fib->hw_fib_pa;
555 fibsize = (fib->hbacmd_size + 127) / 128 - 1;
556 if (fibsize > 31)
557 fibsize = 31;
558 address |= fibsize;
559 #if defined(writeq)
560 src_writeq(dev, MUnit.IQN_L, (u64)address);
561 #else
562 spin_lock_irqsave(&fib->dev->iq_lock, flags);
563 src_writel(dev, MUnit.IQN_H,
564 upper_32_bits(address) & 0xffffffff);
565 src_writel(dev, MUnit.IQN_L, address & 0xffffffff);
566 spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
567 #endif
568 } else {
569 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
570 dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
571 /* Calculate the amount to the fibsize bits */
572 fibsize = (le16_to_cpu(fib->hw_fib_va->header.Size)
573 + 127) / 128 - 1;
574 /* New FIB header, 32-bit */
575 address = fib->hw_fib_pa;
576 fib->hw_fib_va->header.StructType = FIB_MAGIC2;
577 fib->hw_fib_va->header.SenderFibAddress =
578 cpu_to_le32((u32)address);
579 fib->hw_fib_va->header.u.TimeStamp = 0;
580 WARN_ON(upper_32_bits(address) != 0L);
581 } else {
582 /* Calculate the amount to the fibsize bits */
583 fibsize = (sizeof(struct aac_fib_xporthdr) +
584 le16_to_cpu(fib->hw_fib_va->header.Size)
585 + 127) / 128 - 1;
586 /* Fill XPORT header */
587 pFibX = (struct aac_fib_xporthdr *)
588 ((unsigned char *)fib->hw_fib_va -
589 sizeof(struct aac_fib_xporthdr));
590 pFibX->Handle = fib->hw_fib_va->header.Handle;
591 pFibX->HostAddress =
592 cpu_to_le64((u64)fib->hw_fib_pa);
593 pFibX->Size = cpu_to_le32(
594 le16_to_cpu(fib->hw_fib_va->header.Size));
595 address = fib->hw_fib_pa -
596 (u64)sizeof(struct aac_fib_xporthdr);
598 if (fibsize > 31)
599 fibsize = 31;
600 address |= fibsize;
602 #if defined(writeq)
603 src_writeq(dev, MUnit.IQ_L, (u64)address);
604 #else
605 spin_lock_irqsave(&fib->dev->iq_lock, flags);
606 src_writel(dev, MUnit.IQ_H,
607 upper_32_bits(address) & 0xffffffff);
608 src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
609 spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
610 #endif
612 return 0;
616 * aac_src_ioremap
617 * @size: mapping resize request
620 static int aac_src_ioremap(struct aac_dev *dev, u32 size)
622 if (!size) {
623 iounmap(dev->regs.src.bar1);
624 dev->regs.src.bar1 = NULL;
625 iounmap(dev->regs.src.bar0);
626 dev->base = dev->regs.src.bar0 = NULL;
627 return 0;
629 dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2),
630 AAC_MIN_SRC_BAR1_SIZE);
631 dev->base = NULL;
632 if (dev->regs.src.bar1 == NULL)
633 return -1;
634 dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
635 if (dev->base == NULL) {
636 iounmap(dev->regs.src.bar1);
637 dev->regs.src.bar1 = NULL;
638 return -1;
640 dev->IndexRegs = &((struct src_registers __iomem *)
641 dev->base)->u.tupelo.IndexRegs;
642 return 0;
646 * aac_srcv_ioremap
647 * @size: mapping resize request
650 static int aac_srcv_ioremap(struct aac_dev *dev, u32 size)
652 if (!size) {
653 iounmap(dev->regs.src.bar0);
654 dev->base = dev->regs.src.bar0 = NULL;
655 return 0;
658 dev->regs.src.bar1 =
659 ioremap(pci_resource_start(dev->pdev, 2), AAC_MIN_SRCV_BAR1_SIZE);
660 dev->base = NULL;
661 if (dev->regs.src.bar1 == NULL)
662 return -1;
663 dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
664 if (dev->base == NULL) {
665 iounmap(dev->regs.src.bar1);
666 dev->regs.src.bar1 = NULL;
667 return -1;
669 dev->IndexRegs = &((struct src_registers __iomem *)
670 dev->base)->u.denali.IndexRegs;
671 return 0;
674 void aac_set_intx_mode(struct aac_dev *dev)
676 if (dev->msi_enabled) {
677 aac_src_access_devreg(dev, AAC_ENABLE_INTX);
678 dev->msi_enabled = 0;
679 msleep(5000); /* Delay 5 seconds */
683 static void aac_clear_omr(struct aac_dev *dev)
685 u32 omr_value = 0;
687 omr_value = src_readl(dev, MUnit.OMR);
690 * Check for PCI Errors or Kernel Panic
692 if ((omr_value == INVALID_OMR) || (omr_value & KERNEL_PANIC))
693 omr_value = 0;
696 * Preserve MSIX Value if any
698 src_writel(dev, MUnit.OMR, omr_value & AAC_INT_MODE_MSIX);
699 src_readl(dev, MUnit.OMR);
702 static void aac_dump_fw_fib_iop_reset(struct aac_dev *dev)
704 __le32 supported_options3;
706 if (!aac_fib_dump)
707 return;
709 supported_options3 = dev->supplement_adapter_info.supported_options3;
710 if (!(supported_options3 & AAC_OPTION_SUPPORTED3_IOP_RESET_FIB_DUMP))
711 return;
713 aac_adapter_sync_cmd(dev, IOP_RESET_FW_FIB_DUMP,
714 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
717 static bool aac_is_ctrl_up_and_running(struct aac_dev *dev)
719 bool ctrl_up = true;
720 unsigned long status, start;
721 bool is_up = false;
723 start = jiffies;
724 do {
725 schedule();
726 status = src_readl(dev, MUnit.OMR);
728 if (status == 0xffffffff)
729 status = 0;
731 if (status & KERNEL_BOOTING) {
732 start = jiffies;
733 continue;
736 if (time_after(jiffies, start+HZ*SOFT_RESET_TIME)) {
737 ctrl_up = false;
738 break;
741 is_up = status & KERNEL_UP_AND_RUNNING;
743 } while (!is_up);
745 return ctrl_up;
748 static void aac_notify_fw_of_iop_reset(struct aac_dev *dev)
750 aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, NULL,
751 NULL, NULL, NULL, NULL);
754 static void aac_send_iop_reset(struct aac_dev *dev)
756 aac_dump_fw_fib_iop_reset(dev);
758 aac_notify_fw_of_iop_reset(dev);
760 aac_set_intx_mode(dev);
762 aac_clear_omr(dev);
764 src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK);
766 msleep(5000);
769 static void aac_send_hardware_soft_reset(struct aac_dev *dev)
771 u_int32_t val;
773 aac_clear_omr(dev);
774 val = readl(((char *)(dev->base) + IBW_SWR_OFFSET));
775 val |= 0x01;
776 writel(val, ((char *)(dev->base) + IBW_SWR_OFFSET));
777 msleep_interruptible(20000);
780 static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
782 bool is_ctrl_up;
783 int ret = 0;
785 if (bled < 0)
786 goto invalid_out;
788 if (bled)
789 dev_err(&dev->pdev->dev, "adapter kernel panic'd %x.\n", bled);
792 * When there is a BlinkLED, IOP_RESET has not effect
794 if (bled >= 2 && dev->sa_firmware && reset_type & HW_IOP_RESET)
795 reset_type &= ~HW_IOP_RESET;
797 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
799 dev_err(&dev->pdev->dev, "Controller reset type is %d\n", reset_type);
801 if (reset_type & HW_IOP_RESET) {
802 dev_info(&dev->pdev->dev, "Issuing IOP reset\n");
803 aac_send_iop_reset(dev);
806 * Creates a delay or wait till up and running comes thru
808 is_ctrl_up = aac_is_ctrl_up_and_running(dev);
809 if (!is_ctrl_up)
810 dev_err(&dev->pdev->dev, "IOP reset failed\n");
811 else {
812 dev_info(&dev->pdev->dev, "IOP reset succeeded\n");
813 goto set_startup;
817 if (!dev->sa_firmware) {
818 dev_err(&dev->pdev->dev, "ARC Reset attempt failed\n");
819 ret = -ENODEV;
820 goto out;
823 if (reset_type & HW_SOFT_RESET) {
824 dev_info(&dev->pdev->dev, "Issuing SOFT reset\n");
825 aac_send_hardware_soft_reset(dev);
826 dev->msi_enabled = 0;
828 is_ctrl_up = aac_is_ctrl_up_and_running(dev);
829 if (!is_ctrl_up) {
830 dev_err(&dev->pdev->dev, "SOFT reset failed\n");
831 ret = -ENODEV;
832 goto out;
833 } else
834 dev_info(&dev->pdev->dev, "SOFT reset succeeded\n");
837 set_startup:
838 if (startup_timeout < 300)
839 startup_timeout = 300;
841 out:
842 return ret;
844 invalid_out:
845 if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
846 ret = -ENODEV;
847 goto out;
851 * aac_src_select_comm - Select communications method
852 * @dev: Adapter
853 * @comm: communications method
855 static int aac_src_select_comm(struct aac_dev *dev, int comm)
857 switch (comm) {
858 case AAC_COMM_MESSAGE:
859 dev->a_ops.adapter_intr = aac_src_intr_message;
860 dev->a_ops.adapter_deliver = aac_src_deliver_message;
861 break;
862 default:
863 return 1;
865 return 0;
869 * aac_src_init - initialize an Cardinal Frey Bar card
870 * @dev: device to configure
874 int aac_src_init(struct aac_dev *dev)
876 unsigned long start;
877 unsigned long status;
878 int restart = 0;
879 int instance = dev->id;
880 const char *name = dev->name;
882 dev->a_ops.adapter_ioremap = aac_src_ioremap;
883 dev->a_ops.adapter_comm = aac_src_select_comm;
885 dev->base_size = AAC_MIN_SRC_BAR0_SIZE;
886 if (aac_adapter_ioremap(dev, dev->base_size)) {
887 printk(KERN_WARNING "%s: unable to map adapter.\n", name);
888 goto error_iounmap;
891 /* Failure to reset here is an option ... */
892 dev->a_ops.adapter_sync_cmd = src_sync_cmd;
893 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
895 if (dev->init_reset) {
896 dev->init_reset = false;
897 if (!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
898 ++restart;
902 * Check to see if the board panic'd while booting.
904 status = src_readl(dev, MUnit.OMR);
905 if (status & KERNEL_PANIC) {
906 if (aac_src_restart_adapter(dev,
907 aac_src_check_health(dev), IOP_HWSOFT_RESET))
908 goto error_iounmap;
909 ++restart;
912 * Check to see if the board failed any self tests.
914 status = src_readl(dev, MUnit.OMR);
915 if (status & SELF_TEST_FAILED) {
916 printk(KERN_ERR "%s%d: adapter self-test failed.\n",
917 dev->name, instance);
918 goto error_iounmap;
921 * Check to see if the monitor panic'd while booting.
923 if (status & MONITOR_PANIC) {
924 printk(KERN_ERR "%s%d: adapter monitor panic.\n",
925 dev->name, instance);
926 goto error_iounmap;
928 start = jiffies;
930 * Wait for the adapter to be up and running. Wait up to 3 minutes
932 while (!((status = src_readl(dev, MUnit.OMR)) &
933 KERNEL_UP_AND_RUNNING)) {
934 if ((restart &&
935 (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
936 time_after(jiffies, start+HZ*startup_timeout)) {
937 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
938 dev->name, instance, status);
939 goto error_iounmap;
941 if (!restart &&
942 ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
943 time_after(jiffies, start + HZ *
944 ((startup_timeout > 60)
945 ? (startup_timeout - 60)
946 : (startup_timeout / 2))))) {
947 if (likely(!aac_src_restart_adapter(dev,
948 aac_src_check_health(dev), IOP_HWSOFT_RESET)))
949 start = jiffies;
950 ++restart;
952 msleep(1);
954 if (restart && aac_commit)
955 aac_commit = 1;
957 * Fill in the common function dispatch table.
959 dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
960 dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
961 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
962 dev->a_ops.adapter_notify = aac_src_notify_adapter;
963 dev->a_ops.adapter_sync_cmd = src_sync_cmd;
964 dev->a_ops.adapter_check_health = aac_src_check_health;
965 dev->a_ops.adapter_restart = aac_src_restart_adapter;
966 dev->a_ops.adapter_start = aac_src_start_adapter;
969 * First clear out all interrupts. Then enable the one's that we
970 * can handle.
972 aac_adapter_comm(dev, AAC_COMM_MESSAGE);
973 aac_adapter_disable_int(dev);
974 src_writel(dev, MUnit.ODR_C, 0xffffffff);
975 aac_adapter_enable_int(dev);
977 if (aac_init_adapter(dev) == NULL)
978 goto error_iounmap;
979 if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1)
980 goto error_iounmap;
982 dev->msi = !pci_enable_msi(dev->pdev);
984 dev->aac_msix[0].vector_no = 0;
985 dev->aac_msix[0].dev = dev;
987 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
988 IRQF_SHARED, "aacraid", &(dev->aac_msix[0])) < 0) {
990 if (dev->msi)
991 pci_disable_msi(dev->pdev);
993 printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
994 name, instance);
995 goto error_iounmap;
997 dev->dbg_base = pci_resource_start(dev->pdev, 2);
998 dev->dbg_base_mapped = dev->regs.src.bar1;
999 dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE;
1000 dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
1002 aac_adapter_enable_int(dev);
1004 if (!dev->sync_mode) {
1006 * Tell the adapter that all is configured, and it can
1007 * start accepting requests
1009 aac_src_start_adapter(dev);
1011 return 0;
1013 error_iounmap:
1015 return -1;
1018 static int aac_src_wait_sync(struct aac_dev *dev, int *status)
1020 unsigned long start = jiffies;
1021 unsigned long usecs = 0;
1022 int delay = 5 * HZ;
1023 int rc = 1;
1025 while (time_before(jiffies, start+delay)) {
1027 * Delay 5 microseconds to let Mon960 get info.
1029 udelay(5);
1032 * Mon960 will set doorbell0 bit when it has completed the
1033 * command.
1035 if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) {
1037 * Clear: the doorbell.
1039 if (dev->msi_enabled)
1040 aac_src_access_devreg(dev, AAC_CLEAR_SYNC_BIT);
1041 else
1042 src_writel(dev, MUnit.ODR_C,
1043 OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
1044 rc = 0;
1046 break;
1050 * Yield the processor in case we are slow
1052 usecs = 1 * USEC_PER_MSEC;
1053 usleep_range(usecs, usecs + 50);
1056 * Pull the synch status from Mailbox 0.
1058 if (status && !rc) {
1059 status[0] = readl(&dev->IndexRegs->Mailbox[0]);
1060 status[1] = readl(&dev->IndexRegs->Mailbox[1]);
1061 status[2] = readl(&dev->IndexRegs->Mailbox[2]);
1062 status[3] = readl(&dev->IndexRegs->Mailbox[3]);
1063 status[4] = readl(&dev->IndexRegs->Mailbox[4]);
1066 return rc;
1070 * aac_src_soft_reset - perform soft reset to speed up
1071 * access
1073 * Assumptions: That the controller is in a state where we can
1074 * bring it back to life with an init struct. We can only use
1075 * fast sync commands, as the timeout is 5 seconds.
1077 * @dev: device to configure
1081 static int aac_src_soft_reset(struct aac_dev *dev)
1083 u32 status_omr = src_readl(dev, MUnit.OMR);
1084 u32 status[5];
1085 int rc = 1;
1086 int state = 0;
1087 char *state_str[7] = {
1088 "GET_ADAPTER_PROPERTIES Failed",
1089 "GET_ADAPTER_PROPERTIES timeout",
1090 "SOFT_RESET not supported",
1091 "DROP_IO Failed",
1092 "DROP_IO timeout",
1093 "Check Health failed"
1096 if (status_omr == INVALID_OMR)
1097 return 1; // pcie hosed
1099 if (!(status_omr & KERNEL_UP_AND_RUNNING))
1100 return 1; // not up and running
1103 * We go into soft reset mode to allow us to handle response
1105 dev->in_soft_reset = 1;
1106 dev->msi_enabled = status_omr & AAC_INT_MODE_MSIX;
1108 /* Get adapter properties */
1109 rc = aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 0, 0, 0,
1110 0, 0, 0, status+0, status+1, status+2, status+3, status+4);
1111 if (rc)
1112 goto out;
1114 state++;
1115 if (aac_src_wait_sync(dev, status)) {
1116 rc = 1;
1117 goto out;
1120 state++;
1121 if (!(status[1] & le32_to_cpu(AAC_OPT_EXTENDED) &&
1122 (status[4] & le32_to_cpu(AAC_EXTOPT_SOFT_RESET)))) {
1123 rc = 2;
1124 goto out;
1127 if ((status[1] & le32_to_cpu(AAC_OPT_EXTENDED)) &&
1128 (status[4] & le32_to_cpu(AAC_EXTOPT_SA_FIRMWARE)))
1129 dev->sa_firmware = 1;
1131 state++;
1132 rc = aac_adapter_sync_cmd(dev, DROP_IO, 0, 0, 0, 0, 0, 0,
1133 status+0, status+1, status+2, status+3, status+4);
1135 if (rc)
1136 goto out;
1138 state++;
1139 if (aac_src_wait_sync(dev, status)) {
1140 rc = 3;
1141 goto out;
1144 if (status[1])
1145 dev_err(&dev->pdev->dev, "%s: %d outstanding I/O pending\n",
1146 __func__, status[1]);
1148 state++;
1149 rc = aac_src_check_health(dev);
1151 out:
1152 dev->in_soft_reset = 0;
1153 dev->msi_enabled = 0;
1154 if (rc)
1155 dev_err(&dev->pdev->dev, "%s: %s status = %d", __func__,
1156 state_str[state], rc);
1158 return rc;
1161 * aac_srcv_init - initialize an SRCv card
1162 * @dev: device to configure
1166 int aac_srcv_init(struct aac_dev *dev)
1168 unsigned long start;
1169 unsigned long status;
1170 int restart = 0;
1171 int instance = dev->id;
1172 const char *name = dev->name;
1174 dev->a_ops.adapter_ioremap = aac_srcv_ioremap;
1175 dev->a_ops.adapter_comm = aac_src_select_comm;
1177 dev->base_size = AAC_MIN_SRCV_BAR0_SIZE;
1178 if (aac_adapter_ioremap(dev, dev->base_size)) {
1179 printk(KERN_WARNING "%s: unable to map adapter.\n", name);
1180 goto error_iounmap;
1183 /* Failure to reset here is an option ... */
1184 dev->a_ops.adapter_sync_cmd = src_sync_cmd;
1185 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
1187 if (dev->init_reset) {
1188 dev->init_reset = false;
1189 if (aac_src_soft_reset(dev)) {
1190 aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET);
1191 ++restart;
1196 * Check to see if flash update is running.
1197 * Wait for the adapter to be up and running. Wait up to 5 minutes
1199 status = src_readl(dev, MUnit.OMR);
1200 if (status & FLASH_UPD_PENDING) {
1201 start = jiffies;
1202 do {
1203 status = src_readl(dev, MUnit.OMR);
1204 if (time_after(jiffies, start+HZ*FWUPD_TIMEOUT)) {
1205 printk(KERN_ERR "%s%d: adapter flash update failed.\n",
1206 dev->name, instance);
1207 goto error_iounmap;
1209 } while (!(status & FLASH_UPD_SUCCESS) &&
1210 !(status & FLASH_UPD_FAILED));
1211 /* Delay 10 seconds.
1212 * Because right now FW is doing a soft reset,
1213 * do not read scratch pad register at this time
1215 ssleep(10);
1218 * Check to see if the board panic'd while booting.
1220 status = src_readl(dev, MUnit.OMR);
1221 if (status & KERNEL_PANIC) {
1222 if (aac_src_restart_adapter(dev,
1223 aac_src_check_health(dev), IOP_HWSOFT_RESET))
1224 goto error_iounmap;
1225 ++restart;
1228 * Check to see if the board failed any self tests.
1230 status = src_readl(dev, MUnit.OMR);
1231 if (status & SELF_TEST_FAILED) {
1232 printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
1233 goto error_iounmap;
1236 * Check to see if the monitor panic'd while booting.
1238 if (status & MONITOR_PANIC) {
1239 printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
1240 goto error_iounmap;
1243 start = jiffies;
1245 * Wait for the adapter to be up and running. Wait up to 3 minutes
1247 do {
1248 status = src_readl(dev, MUnit.OMR);
1249 if (status == INVALID_OMR)
1250 status = 0;
1252 if ((restart &&
1253 (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
1254 time_after(jiffies, start+HZ*startup_timeout)) {
1255 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
1256 dev->name, instance, status);
1257 goto error_iounmap;
1259 if (!restart &&
1260 ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
1261 time_after(jiffies, start + HZ *
1262 ((startup_timeout > 60)
1263 ? (startup_timeout - 60)
1264 : (startup_timeout / 2))))) {
1265 if (likely(!aac_src_restart_adapter(dev,
1266 aac_src_check_health(dev), IOP_HWSOFT_RESET)))
1267 start = jiffies;
1268 ++restart;
1270 msleep(1);
1271 } while (!(status & KERNEL_UP_AND_RUNNING));
1273 if (restart && aac_commit)
1274 aac_commit = 1;
1276 * Fill in the common function dispatch table.
1278 dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
1279 dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
1280 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
1281 dev->a_ops.adapter_notify = aac_src_notify_adapter;
1282 dev->a_ops.adapter_sync_cmd = src_sync_cmd;
1283 dev->a_ops.adapter_check_health = aac_src_check_health;
1284 dev->a_ops.adapter_restart = aac_src_restart_adapter;
1285 dev->a_ops.adapter_start = aac_src_start_adapter;
1288 * First clear out all interrupts. Then enable the one's that we
1289 * can handle.
1291 aac_adapter_comm(dev, AAC_COMM_MESSAGE);
1292 aac_adapter_disable_int(dev);
1293 src_writel(dev, MUnit.ODR_C, 0xffffffff);
1294 aac_adapter_enable_int(dev);
1296 if (aac_init_adapter(dev) == NULL)
1297 goto error_iounmap;
1298 if ((dev->comm_interface != AAC_COMM_MESSAGE_TYPE2) &&
1299 (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3))
1300 goto error_iounmap;
1301 if (dev->msi_enabled)
1302 aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
1304 if (aac_acquire_irq(dev))
1305 goto error_iounmap;
1307 dev->dbg_base = pci_resource_start(dev->pdev, 2);
1308 dev->dbg_base_mapped = dev->regs.src.bar1;
1309 dev->dbg_size = AAC_MIN_SRCV_BAR1_SIZE;
1310 dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
1312 aac_adapter_enable_int(dev);
1314 if (!dev->sync_mode) {
1316 * Tell the adapter that all is configured, and it can
1317 * start accepting requests
1319 aac_src_start_adapter(dev);
1321 return 0;
1323 error_iounmap:
1325 return -1;
1328 void aac_src_access_devreg(struct aac_dev *dev, int mode)
1330 u_int32_t val;
1332 switch (mode) {
1333 case AAC_ENABLE_INTERRUPT:
1334 src_writel(dev,
1335 MUnit.OIMR,
1336 dev->OIMR = (dev->msi_enabled ?
1337 AAC_INT_ENABLE_TYPE1_MSIX :
1338 AAC_INT_ENABLE_TYPE1_INTX));
1339 break;
1341 case AAC_DISABLE_INTERRUPT:
1342 src_writel(dev,
1343 MUnit.OIMR,
1344 dev->OIMR = AAC_INT_DISABLE_ALL);
1345 break;
1347 case AAC_ENABLE_MSIX:
1348 /* set bit 6 */
1349 val = src_readl(dev, MUnit.IDR);
1350 val |= 0x40;
1351 src_writel(dev, MUnit.IDR, val);
1352 src_readl(dev, MUnit.IDR);
1353 /* unmask int. */
1354 val = PMC_ALL_INTERRUPT_BITS;
1355 src_writel(dev, MUnit.IOAR, val);
1356 val = src_readl(dev, MUnit.OIMR);
1357 src_writel(dev,
1358 MUnit.OIMR,
1359 val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
1360 break;
1362 case AAC_DISABLE_MSIX:
1363 /* reset bit 6 */
1364 val = src_readl(dev, MUnit.IDR);
1365 val &= ~0x40;
1366 src_writel(dev, MUnit.IDR, val);
1367 src_readl(dev, MUnit.IDR);
1368 break;
1370 case AAC_CLEAR_AIF_BIT:
1371 /* set bit 5 */
1372 val = src_readl(dev, MUnit.IDR);
1373 val |= 0x20;
1374 src_writel(dev, MUnit.IDR, val);
1375 src_readl(dev, MUnit.IDR);
1376 break;
1378 case AAC_CLEAR_SYNC_BIT:
1379 /* set bit 4 */
1380 val = src_readl(dev, MUnit.IDR);
1381 val |= 0x10;
1382 src_writel(dev, MUnit.IDR, val);
1383 src_readl(dev, MUnit.IDR);
1384 break;
1386 case AAC_ENABLE_INTX:
1387 /* set bit 7 */
1388 val = src_readl(dev, MUnit.IDR);
1389 val |= 0x80;
1390 src_writel(dev, MUnit.IDR, val);
1391 src_readl(dev, MUnit.IDR);
1392 /* unmask int. */
1393 val = PMC_ALL_INTERRUPT_BITS;
1394 src_writel(dev, MUnit.IOAR, val);
1395 src_readl(dev, MUnit.IOAR);
1396 val = src_readl(dev, MUnit.OIMR);
1397 src_writel(dev, MUnit.OIMR,
1398 val & (~(PMC_GLOBAL_INT_BIT2)));
1399 break;
1401 default:
1402 break;
1406 static int aac_src_get_sync_status(struct aac_dev *dev)
1408 int msix_val = 0;
1409 int legacy_val = 0;
1411 msix_val = src_readl(dev, MUnit.ODR_MSI) & SRC_MSI_READ_MASK ? 1 : 0;
1413 if (!dev->msi_enabled) {
1415 * if Legacy int status indicates cmd is not complete
1416 * sample MSIx register to see if it indiactes cmd complete,
1417 * if yes set the controller in MSIx mode and consider cmd
1418 * completed
1420 legacy_val = src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT;
1421 if (!(legacy_val & 1) && msix_val)
1422 dev->msi_enabled = 1;
1423 return legacy_val;
1426 return msix_val;