blk-mq: always free hctx after request queue is freed
[linux/fpc-iii.git] / drivers / scsi / aacraid / src.c
blob97bb9e9d201c7480e55dbd12da64c5fcb13c9c02
1 /*
2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc.
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
8 * Copyright (c) 2000-2010 Adaptec, Inc.
9 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
10 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 * Module Name:
27 * src.c
29 * Abstract: Hardware Device Interface for PMC SRC based controllers
33 #include <linux/kernel.h>
34 #include <linux/init.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/spinlock.h>
38 #include <linux/slab.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/completion.h>
42 #include <linux/time.h>
43 #include <linux/interrupt.h>
44 #include <scsi/scsi_host.h>
46 #include "aacraid.h"
48 static int aac_src_get_sync_status(struct aac_dev *dev);
50 static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
52 struct aac_msix_ctx *ctx;
53 struct aac_dev *dev;
54 unsigned long bellbits, bellbits_shifted;
55 int vector_no;
56 int isFastResponse, mode;
57 u32 index, handle;
59 ctx = (struct aac_msix_ctx *)dev_id;
60 dev = ctx->dev;
61 vector_no = ctx->vector_no;
63 if (dev->msi_enabled) {
64 mode = AAC_INT_MODE_MSI;
65 if (vector_no == 0) {
66 bellbits = src_readl(dev, MUnit.ODR_MSI);
67 if (bellbits & 0x40000)
68 mode |= AAC_INT_MODE_AIF;
69 if (bellbits & 0x1000)
70 mode |= AAC_INT_MODE_SYNC;
72 } else {
73 mode = AAC_INT_MODE_INTX;
74 bellbits = src_readl(dev, MUnit.ODR_R);
75 if (bellbits & PmDoorBellResponseSent) {
76 bellbits = PmDoorBellResponseSent;
77 src_writel(dev, MUnit.ODR_C, bellbits);
78 src_readl(dev, MUnit.ODR_C);
79 } else {
80 bellbits_shifted = (bellbits >> SRC_ODR_SHIFT);
81 src_writel(dev, MUnit.ODR_C, bellbits);
82 src_readl(dev, MUnit.ODR_C);
84 if (bellbits_shifted & DoorBellAifPending)
85 mode |= AAC_INT_MODE_AIF;
86 else if (bellbits_shifted & OUTBOUNDDOORBELL_0)
87 mode |= AAC_INT_MODE_SYNC;
91 if (mode & AAC_INT_MODE_SYNC) {
92 unsigned long sflags;
93 struct list_head *entry;
94 int send_it = 0;
95 extern int aac_sync_mode;
97 if (!aac_sync_mode && !dev->msi_enabled) {
98 src_writel(dev, MUnit.ODR_C, bellbits);
99 src_readl(dev, MUnit.ODR_C);
102 if (dev->sync_fib) {
103 if (dev->sync_fib->callback)
104 dev->sync_fib->callback(dev->sync_fib->callback_data,
105 dev->sync_fib);
106 spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
107 if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
108 dev->management_fib_count--;
109 complete(&dev->sync_fib->event_wait);
111 spin_unlock_irqrestore(&dev->sync_fib->event_lock,
112 sflags);
113 spin_lock_irqsave(&dev->sync_lock, sflags);
114 if (!list_empty(&dev->sync_fib_list)) {
115 entry = dev->sync_fib_list.next;
116 dev->sync_fib = list_entry(entry,
117 struct fib,
118 fiblink);
119 list_del(entry);
120 send_it = 1;
121 } else {
122 dev->sync_fib = NULL;
124 spin_unlock_irqrestore(&dev->sync_lock, sflags);
125 if (send_it) {
126 aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
127 (u32)dev->sync_fib->hw_fib_pa,
128 0, 0, 0, 0, 0,
129 NULL, NULL, NULL, NULL, NULL);
132 if (!dev->msi_enabled)
133 mode = 0;
137 if (mode & AAC_INT_MODE_AIF) {
138 /* handle AIF */
139 if (dev->sa_firmware) {
140 u32 events = src_readl(dev, MUnit.SCR0);
142 aac_intr_normal(dev, events, 1, 0, NULL);
143 writel(events, &dev->IndexRegs->Mailbox[0]);
144 src_writel(dev, MUnit.IDR, 1 << 23);
145 } else {
146 if (dev->aif_thread && dev->fsa_dev)
147 aac_intr_normal(dev, 0, 2, 0, NULL);
149 if (dev->msi_enabled)
150 aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT);
151 mode = 0;
154 if (mode) {
155 index = dev->host_rrq_idx[vector_no];
157 for (;;) {
158 isFastResponse = 0;
159 /* remove toggle bit (31) */
160 handle = le32_to_cpu((dev->host_rrq[index])
161 & 0x7fffffff);
162 /* check fast response bits (30, 1) */
163 if (handle & 0x40000000)
164 isFastResponse = 1;
165 handle &= 0x0000ffff;
166 if (handle == 0)
167 break;
168 handle >>= 2;
169 if (dev->msi_enabled && dev->max_msix > 1)
170 atomic_dec(&dev->rrq_outstanding[vector_no]);
171 aac_intr_normal(dev, handle, 0, isFastResponse, NULL);
172 dev->host_rrq[index++] = 0;
173 if (index == (vector_no + 1) * dev->vector_cap)
174 index = vector_no * dev->vector_cap;
175 dev->host_rrq_idx[vector_no] = index;
177 mode = 0;
180 return IRQ_HANDLED;
184 * aac_src_disable_interrupt - Disable interrupts
185 * @dev: Adapter
188 static void aac_src_disable_interrupt(struct aac_dev *dev)
190 src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
194 * aac_src_enable_interrupt_message - Enable interrupts
195 * @dev: Adapter
198 static void aac_src_enable_interrupt_message(struct aac_dev *dev)
200 aac_src_access_devreg(dev, AAC_ENABLE_INTERRUPT);
204 * src_sync_cmd - send a command and wait
205 * @dev: Adapter
206 * @command: Command to execute
207 * @p1: first parameter
208 * @ret: adapter status
210 * This routine will send a synchronous command to the adapter and wait
211 * for its completion.
214 static int src_sync_cmd(struct aac_dev *dev, u32 command,
215 u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
216 u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4)
218 unsigned long start;
219 unsigned long delay;
220 int ok;
223 * Write the command into Mailbox 0
225 writel(command, &dev->IndexRegs->Mailbox[0]);
227 * Write the parameters into Mailboxes 1 - 6
229 writel(p1, &dev->IndexRegs->Mailbox[1]);
230 writel(p2, &dev->IndexRegs->Mailbox[2]);
231 writel(p3, &dev->IndexRegs->Mailbox[3]);
232 writel(p4, &dev->IndexRegs->Mailbox[4]);
235 * Clear the synch command doorbell to start on a clean slate.
237 if (!dev->msi_enabled)
238 src_writel(dev,
239 MUnit.ODR_C,
240 OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
243 * Disable doorbell interrupts
245 src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
248 * Force the completion of the mask register write before issuing
249 * the interrupt.
251 src_readl(dev, MUnit.OIMR);
254 * Signal that there is a new synch command
256 src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT);
258 if ((!dev->sync_mode || command != SEND_SYNCHRONOUS_FIB) &&
259 !dev->in_soft_reset) {
260 ok = 0;
261 start = jiffies;
263 if (command == IOP_RESET_ALWAYS) {
264 /* Wait up to 10 sec */
265 delay = 10*HZ;
266 } else {
267 /* Wait up to 5 minutes */
268 delay = 300*HZ;
270 while (time_before(jiffies, start+delay)) {
271 udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
273 * Mon960 will set doorbell0 bit when it has completed the command.
275 if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) {
277 * Clear the doorbell.
279 if (dev->msi_enabled)
280 aac_src_access_devreg(dev,
281 AAC_CLEAR_SYNC_BIT);
282 else
283 src_writel(dev,
284 MUnit.ODR_C,
285 OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
286 ok = 1;
287 break;
290 * Yield the processor in case we are slow
292 msleep(1);
294 if (unlikely(ok != 1)) {
296 * Restore interrupt mask even though we timed out
298 aac_adapter_enable_int(dev);
299 return -ETIMEDOUT;
302 * Pull the synch status from Mailbox 0.
304 if (status)
305 *status = readl(&dev->IndexRegs->Mailbox[0]);
306 if (r1)
307 *r1 = readl(&dev->IndexRegs->Mailbox[1]);
308 if (r2)
309 *r2 = readl(&dev->IndexRegs->Mailbox[2]);
310 if (r3)
311 *r3 = readl(&dev->IndexRegs->Mailbox[3]);
312 if (r4)
313 *r4 = readl(&dev->IndexRegs->Mailbox[4]);
314 if (command == GET_COMM_PREFERRED_SETTINGS)
315 dev->max_msix =
316 readl(&dev->IndexRegs->Mailbox[5]) & 0xFFFF;
318 * Clear the synch command doorbell.
320 if (!dev->msi_enabled)
321 src_writel(dev,
322 MUnit.ODR_C,
323 OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
327 * Restore interrupt mask
329 aac_adapter_enable_int(dev);
330 return 0;
334 * aac_src_interrupt_adapter - interrupt adapter
335 * @dev: Adapter
337 * Send an interrupt to the i960 and breakpoint it.
340 static void aac_src_interrupt_adapter(struct aac_dev *dev)
342 src_sync_cmd(dev, BREAKPOINT_REQUEST,
343 0, 0, 0, 0, 0, 0,
344 NULL, NULL, NULL, NULL, NULL);
348 * aac_src_notify_adapter - send an event to the adapter
349 * @dev: Adapter
350 * @event: Event to send
352 * Notify the i960 that something it probably cares about has
353 * happened.
356 static void aac_src_notify_adapter(struct aac_dev *dev, u32 event)
358 switch (event) {
360 case AdapNormCmdQue:
361 src_writel(dev, MUnit.ODR_C,
362 INBOUNDDOORBELL_1 << SRC_ODR_SHIFT);
363 break;
364 case HostNormRespNotFull:
365 src_writel(dev, MUnit.ODR_C,
366 INBOUNDDOORBELL_4 << SRC_ODR_SHIFT);
367 break;
368 case AdapNormRespQue:
369 src_writel(dev, MUnit.ODR_C,
370 INBOUNDDOORBELL_2 << SRC_ODR_SHIFT);
371 break;
372 case HostNormCmdNotFull:
373 src_writel(dev, MUnit.ODR_C,
374 INBOUNDDOORBELL_3 << SRC_ODR_SHIFT);
375 break;
376 case FastIo:
377 src_writel(dev, MUnit.ODR_C,
378 INBOUNDDOORBELL_6 << SRC_ODR_SHIFT);
379 break;
380 case AdapPrintfDone:
381 src_writel(dev, MUnit.ODR_C,
382 INBOUNDDOORBELL_5 << SRC_ODR_SHIFT);
383 break;
384 default:
385 BUG();
386 break;
391 * aac_src_start_adapter - activate adapter
392 * @dev: Adapter
394 * Start up processing on an i960 based AAC adapter
397 static void aac_src_start_adapter(struct aac_dev *dev)
399 union aac_init *init;
400 int i;
402 /* reset host_rrq_idx first */
403 for (i = 0; i < dev->max_msix; i++) {
404 dev->host_rrq_idx[i] = i * dev->vector_cap;
405 atomic_set(&dev->rrq_outstanding[i], 0);
407 atomic_set(&dev->msix_counter, 0);
408 dev->fibs_pushed_no = 0;
410 init = dev->init;
411 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
412 init->r8.host_elapsed_seconds =
413 cpu_to_le32(ktime_get_real_seconds());
414 src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
415 lower_32_bits(dev->init_pa),
416 upper_32_bits(dev->init_pa),
417 sizeof(struct _r8) +
418 (AAC_MAX_HRRQ - 1) * sizeof(struct _rrq),
419 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
420 } else {
421 init->r7.host_elapsed_seconds =
422 cpu_to_le32(ktime_get_real_seconds());
423 // We can only use a 32 bit address here
424 src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
425 (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
426 NULL, NULL, NULL, NULL, NULL);
432 * aac_src_check_health
433 * @dev: device to check if healthy
435 * Will attempt to determine if the specified adapter is alive and
436 * capable of handling requests, returning 0 if alive.
438 static int aac_src_check_health(struct aac_dev *dev)
440 u32 status = src_readl(dev, MUnit.OMR);
443 * Check to see if the board panic'd.
445 if (unlikely(status & KERNEL_PANIC))
446 goto err_blink;
449 * Check to see if the board failed any self tests.
451 if (unlikely(status & SELF_TEST_FAILED))
452 goto err_out;
455 * Check to see if the board failed any self tests.
457 if (unlikely(status & MONITOR_PANIC))
458 goto err_out;
461 * Wait for the adapter to be up and running.
463 if (unlikely(!(status & KERNEL_UP_AND_RUNNING)))
464 return -3;
466 * Everything is OK
468 return 0;
470 err_out:
471 return -1;
473 err_blink:
474 return (status >> 16) & 0xFF;
477 static inline u32 aac_get_vector(struct aac_dev *dev)
479 return atomic_inc_return(&dev->msix_counter)%dev->max_msix;
483 * aac_src_deliver_message
484 * @fib: fib to issue
486 * Will send a fib, returning 0 if successful.
488 static int aac_src_deliver_message(struct fib *fib)
490 struct aac_dev *dev = fib->dev;
491 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
492 u32 fibsize;
493 dma_addr_t address;
494 struct aac_fib_xporthdr *pFibX;
495 int native_hba;
496 #if !defined(writeq)
497 unsigned long flags;
498 #endif
500 u16 vector_no;
502 atomic_inc(&q->numpending);
504 native_hba = (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) ? 1 : 0;
507 if (dev->msi_enabled && dev->max_msix > 1 &&
508 (native_hba || fib->hw_fib_va->header.Command != AifRequest)) {
510 if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)
511 && dev->sa_firmware)
512 vector_no = aac_get_vector(dev);
513 else
514 vector_no = fib->vector_no;
516 if (native_hba) {
517 if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) {
518 struct aac_hba_tm_req *tm_req;
520 tm_req = (struct aac_hba_tm_req *)
521 fib->hw_fib_va;
522 if (tm_req->iu_type ==
523 HBA_IU_TYPE_SCSI_TM_REQ) {
524 ((struct aac_hba_tm_req *)
525 fib->hw_fib_va)->reply_qid
526 = vector_no;
527 ((struct aac_hba_tm_req *)
528 fib->hw_fib_va)->request_id
529 += (vector_no << 16);
530 } else {
531 ((struct aac_hba_reset_req *)
532 fib->hw_fib_va)->reply_qid
533 = vector_no;
534 ((struct aac_hba_reset_req *)
535 fib->hw_fib_va)->request_id
536 += (vector_no << 16);
538 } else {
539 ((struct aac_hba_cmd_req *)
540 fib->hw_fib_va)->reply_qid
541 = vector_no;
542 ((struct aac_hba_cmd_req *)
543 fib->hw_fib_va)->request_id
544 += (vector_no << 16);
546 } else {
547 fib->hw_fib_va->header.Handle += (vector_no << 16);
549 } else {
550 vector_no = 0;
553 atomic_inc(&dev->rrq_outstanding[vector_no]);
555 if (native_hba) {
556 address = fib->hw_fib_pa;
557 fibsize = (fib->hbacmd_size + 127) / 128 - 1;
558 if (fibsize > 31)
559 fibsize = 31;
560 address |= fibsize;
561 #if defined(writeq)
562 src_writeq(dev, MUnit.IQN_L, (u64)address);
563 #else
564 spin_lock_irqsave(&fib->dev->iq_lock, flags);
565 src_writel(dev, MUnit.IQN_H,
566 upper_32_bits(address) & 0xffffffff);
567 src_writel(dev, MUnit.IQN_L, address & 0xffffffff);
568 spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
569 #endif
570 } else {
571 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
572 dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
573 /* Calculate the amount to the fibsize bits */
574 fibsize = (le16_to_cpu(fib->hw_fib_va->header.Size)
575 + 127) / 128 - 1;
576 /* New FIB header, 32-bit */
577 address = fib->hw_fib_pa;
578 fib->hw_fib_va->header.StructType = FIB_MAGIC2;
579 fib->hw_fib_va->header.SenderFibAddress =
580 cpu_to_le32((u32)address);
581 fib->hw_fib_va->header.u.TimeStamp = 0;
582 WARN_ON(upper_32_bits(address) != 0L);
583 } else {
584 /* Calculate the amount to the fibsize bits */
585 fibsize = (sizeof(struct aac_fib_xporthdr) +
586 le16_to_cpu(fib->hw_fib_va->header.Size)
587 + 127) / 128 - 1;
588 /* Fill XPORT header */
589 pFibX = (struct aac_fib_xporthdr *)
590 ((unsigned char *)fib->hw_fib_va -
591 sizeof(struct aac_fib_xporthdr));
592 pFibX->Handle = fib->hw_fib_va->header.Handle;
593 pFibX->HostAddress =
594 cpu_to_le64((u64)fib->hw_fib_pa);
595 pFibX->Size = cpu_to_le32(
596 le16_to_cpu(fib->hw_fib_va->header.Size));
597 address = fib->hw_fib_pa -
598 (u64)sizeof(struct aac_fib_xporthdr);
600 if (fibsize > 31)
601 fibsize = 31;
602 address |= fibsize;
604 #if defined(writeq)
605 src_writeq(dev, MUnit.IQ_L, (u64)address);
606 #else
607 spin_lock_irqsave(&fib->dev->iq_lock, flags);
608 src_writel(dev, MUnit.IQ_H,
609 upper_32_bits(address) & 0xffffffff);
610 src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
611 spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
612 #endif
614 return 0;
618 * aac_src_ioremap
619 * @size: mapping resize request
622 static int aac_src_ioremap(struct aac_dev *dev, u32 size)
624 if (!size) {
625 iounmap(dev->regs.src.bar1);
626 dev->regs.src.bar1 = NULL;
627 iounmap(dev->regs.src.bar0);
628 dev->base = dev->regs.src.bar0 = NULL;
629 return 0;
631 dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2),
632 AAC_MIN_SRC_BAR1_SIZE);
633 dev->base = NULL;
634 if (dev->regs.src.bar1 == NULL)
635 return -1;
636 dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
637 if (dev->base == NULL) {
638 iounmap(dev->regs.src.bar1);
639 dev->regs.src.bar1 = NULL;
640 return -1;
642 dev->IndexRegs = &((struct src_registers __iomem *)
643 dev->base)->u.tupelo.IndexRegs;
644 return 0;
648 * aac_srcv_ioremap
649 * @size: mapping resize request
652 static int aac_srcv_ioremap(struct aac_dev *dev, u32 size)
654 if (!size) {
655 iounmap(dev->regs.src.bar0);
656 dev->base = dev->regs.src.bar0 = NULL;
657 return 0;
660 dev->regs.src.bar1 =
661 ioremap(pci_resource_start(dev->pdev, 2), AAC_MIN_SRCV_BAR1_SIZE);
662 dev->base = NULL;
663 if (dev->regs.src.bar1 == NULL)
664 return -1;
665 dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
666 if (dev->base == NULL) {
667 iounmap(dev->regs.src.bar1);
668 dev->regs.src.bar1 = NULL;
669 return -1;
671 dev->IndexRegs = &((struct src_registers __iomem *)
672 dev->base)->u.denali.IndexRegs;
673 return 0;
676 void aac_set_intx_mode(struct aac_dev *dev)
678 if (dev->msi_enabled) {
679 aac_src_access_devreg(dev, AAC_ENABLE_INTX);
680 dev->msi_enabled = 0;
681 msleep(5000); /* Delay 5 seconds */
685 static void aac_clear_omr(struct aac_dev *dev)
687 u32 omr_value = 0;
689 omr_value = src_readl(dev, MUnit.OMR);
692 * Check for PCI Errors or Kernel Panic
694 if ((omr_value == INVALID_OMR) || (omr_value & KERNEL_PANIC))
695 omr_value = 0;
698 * Preserve MSIX Value if any
700 src_writel(dev, MUnit.OMR, omr_value & AAC_INT_MODE_MSIX);
701 src_readl(dev, MUnit.OMR);
704 static void aac_dump_fw_fib_iop_reset(struct aac_dev *dev)
706 __le32 supported_options3;
708 if (!aac_fib_dump)
709 return;
711 supported_options3 = dev->supplement_adapter_info.supported_options3;
712 if (!(supported_options3 & AAC_OPTION_SUPPORTED3_IOP_RESET_FIB_DUMP))
713 return;
715 aac_adapter_sync_cmd(dev, IOP_RESET_FW_FIB_DUMP,
716 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
719 static bool aac_is_ctrl_up_and_running(struct aac_dev *dev)
721 bool ctrl_up = true;
722 unsigned long status, start;
723 bool is_up = false;
725 start = jiffies;
726 do {
727 schedule();
728 status = src_readl(dev, MUnit.OMR);
730 if (status == 0xffffffff)
731 status = 0;
733 if (status & KERNEL_BOOTING) {
734 start = jiffies;
735 continue;
738 if (time_after(jiffies, start+HZ*SOFT_RESET_TIME)) {
739 ctrl_up = false;
740 break;
743 is_up = status & KERNEL_UP_AND_RUNNING;
745 } while (!is_up);
747 return ctrl_up;
750 static void aac_notify_fw_of_iop_reset(struct aac_dev *dev)
752 aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, NULL,
753 NULL, NULL, NULL, NULL);
756 static void aac_send_iop_reset(struct aac_dev *dev)
758 aac_dump_fw_fib_iop_reset(dev);
760 aac_notify_fw_of_iop_reset(dev);
762 aac_set_intx_mode(dev);
764 aac_clear_omr(dev);
766 src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK);
768 msleep(5000);
771 static void aac_send_hardware_soft_reset(struct aac_dev *dev)
773 u_int32_t val;
775 aac_clear_omr(dev);
776 val = readl(((char *)(dev->base) + IBW_SWR_OFFSET));
777 val |= 0x01;
778 writel(val, ((char *)(dev->base) + IBW_SWR_OFFSET));
779 msleep_interruptible(20000);
782 static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
784 bool is_ctrl_up;
785 int ret = 0;
787 if (bled < 0)
788 goto invalid_out;
790 if (bled)
791 dev_err(&dev->pdev->dev, "adapter kernel panic'd %x.\n", bled);
794 * When there is a BlinkLED, IOP_RESET has not effect
796 if (bled >= 2 && dev->sa_firmware && reset_type & HW_IOP_RESET)
797 reset_type &= ~HW_IOP_RESET;
799 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
801 dev_err(&dev->pdev->dev, "Controller reset type is %d\n", reset_type);
803 if (reset_type & HW_IOP_RESET) {
804 dev_info(&dev->pdev->dev, "Issuing IOP reset\n");
805 aac_send_iop_reset(dev);
808 * Creates a delay or wait till up and running comes thru
810 is_ctrl_up = aac_is_ctrl_up_and_running(dev);
811 if (!is_ctrl_up)
812 dev_err(&dev->pdev->dev, "IOP reset failed\n");
813 else {
814 dev_info(&dev->pdev->dev, "IOP reset succeeded\n");
815 goto set_startup;
819 if (!dev->sa_firmware) {
820 dev_err(&dev->pdev->dev, "ARC Reset attempt failed\n");
821 ret = -ENODEV;
822 goto out;
825 if (reset_type & HW_SOFT_RESET) {
826 dev_info(&dev->pdev->dev, "Issuing SOFT reset\n");
827 aac_send_hardware_soft_reset(dev);
828 dev->msi_enabled = 0;
830 is_ctrl_up = aac_is_ctrl_up_and_running(dev);
831 if (!is_ctrl_up) {
832 dev_err(&dev->pdev->dev, "SOFT reset failed\n");
833 ret = -ENODEV;
834 goto out;
835 } else
836 dev_info(&dev->pdev->dev, "SOFT reset succeeded\n");
839 set_startup:
840 if (startup_timeout < 300)
841 startup_timeout = 300;
843 out:
844 return ret;
846 invalid_out:
847 if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
848 ret = -ENODEV;
849 goto out;
853 * aac_src_select_comm - Select communications method
854 * @dev: Adapter
855 * @comm: communications method
857 static int aac_src_select_comm(struct aac_dev *dev, int comm)
859 switch (comm) {
860 case AAC_COMM_MESSAGE:
861 dev->a_ops.adapter_intr = aac_src_intr_message;
862 dev->a_ops.adapter_deliver = aac_src_deliver_message;
863 break;
864 default:
865 return 1;
867 return 0;
871 * aac_src_init - initialize an Cardinal Frey Bar card
872 * @dev: device to configure
876 int aac_src_init(struct aac_dev *dev)
878 unsigned long start;
879 unsigned long status;
880 int restart = 0;
881 int instance = dev->id;
882 const char *name = dev->name;
884 dev->a_ops.adapter_ioremap = aac_src_ioremap;
885 dev->a_ops.adapter_comm = aac_src_select_comm;
887 dev->base_size = AAC_MIN_SRC_BAR0_SIZE;
888 if (aac_adapter_ioremap(dev, dev->base_size)) {
889 printk(KERN_WARNING "%s: unable to map adapter.\n", name);
890 goto error_iounmap;
893 /* Failure to reset here is an option ... */
894 dev->a_ops.adapter_sync_cmd = src_sync_cmd;
895 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
897 if (dev->init_reset) {
898 dev->init_reset = false;
899 if (!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
900 ++restart;
904 * Check to see if the board panic'd while booting.
906 status = src_readl(dev, MUnit.OMR);
907 if (status & KERNEL_PANIC) {
908 if (aac_src_restart_adapter(dev,
909 aac_src_check_health(dev), IOP_HWSOFT_RESET))
910 goto error_iounmap;
911 ++restart;
914 * Check to see if the board failed any self tests.
916 status = src_readl(dev, MUnit.OMR);
917 if (status & SELF_TEST_FAILED) {
918 printk(KERN_ERR "%s%d: adapter self-test failed.\n",
919 dev->name, instance);
920 goto error_iounmap;
923 * Check to see if the monitor panic'd while booting.
925 if (status & MONITOR_PANIC) {
926 printk(KERN_ERR "%s%d: adapter monitor panic.\n",
927 dev->name, instance);
928 goto error_iounmap;
930 start = jiffies;
932 * Wait for the adapter to be up and running. Wait up to 3 minutes
934 while (!((status = src_readl(dev, MUnit.OMR)) &
935 KERNEL_UP_AND_RUNNING)) {
936 if ((restart &&
937 (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
938 time_after(jiffies, start+HZ*startup_timeout)) {
939 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
940 dev->name, instance, status);
941 goto error_iounmap;
943 if (!restart &&
944 ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
945 time_after(jiffies, start + HZ *
946 ((startup_timeout > 60)
947 ? (startup_timeout - 60)
948 : (startup_timeout / 2))))) {
949 if (likely(!aac_src_restart_adapter(dev,
950 aac_src_check_health(dev), IOP_HWSOFT_RESET)))
951 start = jiffies;
952 ++restart;
954 msleep(1);
956 if (restart && aac_commit)
957 aac_commit = 1;
959 * Fill in the common function dispatch table.
961 dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
962 dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
963 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
964 dev->a_ops.adapter_notify = aac_src_notify_adapter;
965 dev->a_ops.adapter_sync_cmd = src_sync_cmd;
966 dev->a_ops.adapter_check_health = aac_src_check_health;
967 dev->a_ops.adapter_restart = aac_src_restart_adapter;
968 dev->a_ops.adapter_start = aac_src_start_adapter;
971 * First clear out all interrupts. Then enable the one's that we
972 * can handle.
974 aac_adapter_comm(dev, AAC_COMM_MESSAGE);
975 aac_adapter_disable_int(dev);
976 src_writel(dev, MUnit.ODR_C, 0xffffffff);
977 aac_adapter_enable_int(dev);
979 if (aac_init_adapter(dev) == NULL)
980 goto error_iounmap;
981 if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1)
982 goto error_iounmap;
984 dev->msi = !pci_enable_msi(dev->pdev);
986 dev->aac_msix[0].vector_no = 0;
987 dev->aac_msix[0].dev = dev;
989 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
990 IRQF_SHARED, "aacraid", &(dev->aac_msix[0])) < 0) {
992 if (dev->msi)
993 pci_disable_msi(dev->pdev);
995 printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
996 name, instance);
997 goto error_iounmap;
999 dev->dbg_base = pci_resource_start(dev->pdev, 2);
1000 dev->dbg_base_mapped = dev->regs.src.bar1;
1001 dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE;
1002 dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
1004 aac_adapter_enable_int(dev);
1006 if (!dev->sync_mode) {
1008 * Tell the adapter that all is configured, and it can
1009 * start accepting requests
1011 aac_src_start_adapter(dev);
1013 return 0;
1015 error_iounmap:
1017 return -1;
1020 static int aac_src_wait_sync(struct aac_dev *dev, int *status)
1022 unsigned long start = jiffies;
1023 unsigned long usecs = 0;
1024 int delay = 5 * HZ;
1025 int rc = 1;
1027 while (time_before(jiffies, start+delay)) {
1029 * Delay 5 microseconds to let Mon960 get info.
1031 udelay(5);
1034 * Mon960 will set doorbell0 bit when it has completed the
1035 * command.
1037 if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) {
1039 * Clear: the doorbell.
1041 if (dev->msi_enabled)
1042 aac_src_access_devreg(dev, AAC_CLEAR_SYNC_BIT);
1043 else
1044 src_writel(dev, MUnit.ODR_C,
1045 OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
1046 rc = 0;
1048 break;
1052 * Yield the processor in case we are slow
1054 usecs = 1 * USEC_PER_MSEC;
1055 usleep_range(usecs, usecs + 50);
1058 * Pull the synch status from Mailbox 0.
1060 if (status && !rc) {
1061 status[0] = readl(&dev->IndexRegs->Mailbox[0]);
1062 status[1] = readl(&dev->IndexRegs->Mailbox[1]);
1063 status[2] = readl(&dev->IndexRegs->Mailbox[2]);
1064 status[3] = readl(&dev->IndexRegs->Mailbox[3]);
1065 status[4] = readl(&dev->IndexRegs->Mailbox[4]);
1068 return rc;
1072 * aac_src_soft_reset - perform soft reset to speed up
1073 * access
1075 * Assumptions: That the controller is in a state where we can
1076 * bring it back to life with an init struct. We can only use
1077 * fast sync commands, as the timeout is 5 seconds.
1079 * @dev: device to configure
1083 static int aac_src_soft_reset(struct aac_dev *dev)
1085 u32 status_omr = src_readl(dev, MUnit.OMR);
1086 u32 status[5];
1087 int rc = 1;
1088 int state = 0;
1089 char *state_str[7] = {
1090 "GET_ADAPTER_PROPERTIES Failed",
1091 "GET_ADAPTER_PROPERTIES timeout",
1092 "SOFT_RESET not supported",
1093 "DROP_IO Failed",
1094 "DROP_IO timeout",
1095 "Check Health failed"
1098 if (status_omr == INVALID_OMR)
1099 return 1; // pcie hosed
1101 if (!(status_omr & KERNEL_UP_AND_RUNNING))
1102 return 1; // not up and running
1105 * We go into soft reset mode to allow us to handle response
1107 dev->in_soft_reset = 1;
1108 dev->msi_enabled = status_omr & AAC_INT_MODE_MSIX;
1110 /* Get adapter properties */
1111 rc = aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 0, 0, 0,
1112 0, 0, 0, status+0, status+1, status+2, status+3, status+4);
1113 if (rc)
1114 goto out;
1116 state++;
1117 if (aac_src_wait_sync(dev, status)) {
1118 rc = 1;
1119 goto out;
1122 state++;
1123 if (!(status[1] & le32_to_cpu(AAC_OPT_EXTENDED) &&
1124 (status[4] & le32_to_cpu(AAC_EXTOPT_SOFT_RESET)))) {
1125 rc = 2;
1126 goto out;
1129 if ((status[1] & le32_to_cpu(AAC_OPT_EXTENDED)) &&
1130 (status[4] & le32_to_cpu(AAC_EXTOPT_SA_FIRMWARE)))
1131 dev->sa_firmware = 1;
1133 state++;
1134 rc = aac_adapter_sync_cmd(dev, DROP_IO, 0, 0, 0, 0, 0, 0,
1135 status+0, status+1, status+2, status+3, status+4);
1137 if (rc)
1138 goto out;
1140 state++;
1141 if (aac_src_wait_sync(dev, status)) {
1142 rc = 3;
1143 goto out;
1146 if (status[1])
1147 dev_err(&dev->pdev->dev, "%s: %d outstanding I/O pending\n",
1148 __func__, status[1]);
1150 state++;
1151 rc = aac_src_check_health(dev);
1153 out:
1154 dev->in_soft_reset = 0;
1155 dev->msi_enabled = 0;
1156 if (rc)
1157 dev_err(&dev->pdev->dev, "%s: %s status = %d", __func__,
1158 state_str[state], rc);
1160 return rc;
1163 * aac_srcv_init - initialize an SRCv card
1164 * @dev: device to configure
1168 int aac_srcv_init(struct aac_dev *dev)
1170 unsigned long start;
1171 unsigned long status;
1172 int restart = 0;
1173 int instance = dev->id;
1174 const char *name = dev->name;
1176 dev->a_ops.adapter_ioremap = aac_srcv_ioremap;
1177 dev->a_ops.adapter_comm = aac_src_select_comm;
1179 dev->base_size = AAC_MIN_SRCV_BAR0_SIZE;
1180 if (aac_adapter_ioremap(dev, dev->base_size)) {
1181 printk(KERN_WARNING "%s: unable to map adapter.\n", name);
1182 goto error_iounmap;
1185 /* Failure to reset here is an option ... */
1186 dev->a_ops.adapter_sync_cmd = src_sync_cmd;
1187 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
1189 if (dev->init_reset) {
1190 dev->init_reset = false;
1191 if (aac_src_soft_reset(dev)) {
1192 aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET);
1193 ++restart;
1198 * Check to see if flash update is running.
1199 * Wait for the adapter to be up and running. Wait up to 5 minutes
1201 status = src_readl(dev, MUnit.OMR);
1202 if (status & FLASH_UPD_PENDING) {
1203 start = jiffies;
1204 do {
1205 status = src_readl(dev, MUnit.OMR);
1206 if (time_after(jiffies, start+HZ*FWUPD_TIMEOUT)) {
1207 printk(KERN_ERR "%s%d: adapter flash update failed.\n",
1208 dev->name, instance);
1209 goto error_iounmap;
1211 } while (!(status & FLASH_UPD_SUCCESS) &&
1212 !(status & FLASH_UPD_FAILED));
1213 /* Delay 10 seconds.
1214 * Because right now FW is doing a soft reset,
1215 * do not read scratch pad register at this time
1217 ssleep(10);
1220 * Check to see if the board panic'd while booting.
1222 status = src_readl(dev, MUnit.OMR);
1223 if (status & KERNEL_PANIC) {
1224 if (aac_src_restart_adapter(dev,
1225 aac_src_check_health(dev), IOP_HWSOFT_RESET))
1226 goto error_iounmap;
1227 ++restart;
1230 * Check to see if the board failed any self tests.
1232 status = src_readl(dev, MUnit.OMR);
1233 if (status & SELF_TEST_FAILED) {
1234 printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
1235 goto error_iounmap;
1238 * Check to see if the monitor panic'd while booting.
1240 if (status & MONITOR_PANIC) {
1241 printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
1242 goto error_iounmap;
1245 start = jiffies;
1247 * Wait for the adapter to be up and running. Wait up to 3 minutes
1249 do {
1250 status = src_readl(dev, MUnit.OMR);
1251 if (status == INVALID_OMR)
1252 status = 0;
1254 if ((restart &&
1255 (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
1256 time_after(jiffies, start+HZ*startup_timeout)) {
1257 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
1258 dev->name, instance, status);
1259 goto error_iounmap;
1261 if (!restart &&
1262 ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
1263 time_after(jiffies, start + HZ *
1264 ((startup_timeout > 60)
1265 ? (startup_timeout - 60)
1266 : (startup_timeout / 2))))) {
1267 if (likely(!aac_src_restart_adapter(dev,
1268 aac_src_check_health(dev), IOP_HWSOFT_RESET)))
1269 start = jiffies;
1270 ++restart;
1272 msleep(1);
1273 } while (!(status & KERNEL_UP_AND_RUNNING));
1275 if (restart && aac_commit)
1276 aac_commit = 1;
1278 * Fill in the common function dispatch table.
1280 dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
1281 dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
1282 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
1283 dev->a_ops.adapter_notify = aac_src_notify_adapter;
1284 dev->a_ops.adapter_sync_cmd = src_sync_cmd;
1285 dev->a_ops.adapter_check_health = aac_src_check_health;
1286 dev->a_ops.adapter_restart = aac_src_restart_adapter;
1287 dev->a_ops.adapter_start = aac_src_start_adapter;
1290 * First clear out all interrupts. Then enable the one's that we
1291 * can handle.
1293 aac_adapter_comm(dev, AAC_COMM_MESSAGE);
1294 aac_adapter_disable_int(dev);
1295 src_writel(dev, MUnit.ODR_C, 0xffffffff);
1296 aac_adapter_enable_int(dev);
1298 if (aac_init_adapter(dev) == NULL)
1299 goto error_iounmap;
1300 if ((dev->comm_interface != AAC_COMM_MESSAGE_TYPE2) &&
1301 (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3))
1302 goto error_iounmap;
1303 if (dev->msi_enabled)
1304 aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
1306 if (aac_acquire_irq(dev))
1307 goto error_iounmap;
1309 dev->dbg_base = pci_resource_start(dev->pdev, 2);
1310 dev->dbg_base_mapped = dev->regs.src.bar1;
1311 dev->dbg_size = AAC_MIN_SRCV_BAR1_SIZE;
1312 dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
1314 aac_adapter_enable_int(dev);
1316 if (!dev->sync_mode) {
1318 * Tell the adapter that all is configured, and it can
1319 * start accepting requests
1321 aac_src_start_adapter(dev);
1323 return 0;
1325 error_iounmap:
1327 return -1;
1330 void aac_src_access_devreg(struct aac_dev *dev, int mode)
1332 u_int32_t val;
1334 switch (mode) {
1335 case AAC_ENABLE_INTERRUPT:
1336 src_writel(dev,
1337 MUnit.OIMR,
1338 dev->OIMR = (dev->msi_enabled ?
1339 AAC_INT_ENABLE_TYPE1_MSIX :
1340 AAC_INT_ENABLE_TYPE1_INTX));
1341 break;
1343 case AAC_DISABLE_INTERRUPT:
1344 src_writel(dev,
1345 MUnit.OIMR,
1346 dev->OIMR = AAC_INT_DISABLE_ALL);
1347 break;
1349 case AAC_ENABLE_MSIX:
1350 /* set bit 6 */
1351 val = src_readl(dev, MUnit.IDR);
1352 val |= 0x40;
1353 src_writel(dev, MUnit.IDR, val);
1354 src_readl(dev, MUnit.IDR);
1355 /* unmask int. */
1356 val = PMC_ALL_INTERRUPT_BITS;
1357 src_writel(dev, MUnit.IOAR, val);
1358 val = src_readl(dev, MUnit.OIMR);
1359 src_writel(dev,
1360 MUnit.OIMR,
1361 val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
1362 break;
1364 case AAC_DISABLE_MSIX:
1365 /* reset bit 6 */
1366 val = src_readl(dev, MUnit.IDR);
1367 val &= ~0x40;
1368 src_writel(dev, MUnit.IDR, val);
1369 src_readl(dev, MUnit.IDR);
1370 break;
1372 case AAC_CLEAR_AIF_BIT:
1373 /* set bit 5 */
1374 val = src_readl(dev, MUnit.IDR);
1375 val |= 0x20;
1376 src_writel(dev, MUnit.IDR, val);
1377 src_readl(dev, MUnit.IDR);
1378 break;
1380 case AAC_CLEAR_SYNC_BIT:
1381 /* set bit 4 */
1382 val = src_readl(dev, MUnit.IDR);
1383 val |= 0x10;
1384 src_writel(dev, MUnit.IDR, val);
1385 src_readl(dev, MUnit.IDR);
1386 break;
1388 case AAC_ENABLE_INTX:
1389 /* set bit 7 */
1390 val = src_readl(dev, MUnit.IDR);
1391 val |= 0x80;
1392 src_writel(dev, MUnit.IDR, val);
1393 src_readl(dev, MUnit.IDR);
1394 /* unmask int. */
1395 val = PMC_ALL_INTERRUPT_BITS;
1396 src_writel(dev, MUnit.IOAR, val);
1397 src_readl(dev, MUnit.IOAR);
1398 val = src_readl(dev, MUnit.OIMR);
1399 src_writel(dev, MUnit.OIMR,
1400 val & (~(PMC_GLOBAL_INT_BIT2)));
1401 break;
1403 default:
1404 break;
1408 static int aac_src_get_sync_status(struct aac_dev *dev)
1410 int msix_val = 0;
1411 int legacy_val = 0;
1413 msix_val = src_readl(dev, MUnit.ODR_MSI) & SRC_MSI_READ_MASK ? 1 : 0;
1415 if (!dev->msi_enabled) {
1417 * if Legacy int status indicates cmd is not complete
1418 * sample MSIx register to see if it indiactes cmd complete,
1419 * if yes set the controller in MSIx mode and consider cmd
1420 * completed
1422 legacy_val = src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT;
1423 if (!(legacy_val & 1) && msix_val)
1424 dev->msi_enabled = 1;
1425 return legacy_val;
1428 return msix_val;