2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc.
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
8 * Copyright (c) 2000-2010 Adaptec, Inc.
9 * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
28 * Abstract: Hardware Device Interface for PMC SRC based controllers
32 #include <linux/kernel.h>
33 #include <linux/init.h>
34 #include <linux/types.h>
35 #include <linux/pci.h>
36 #include <linux/spinlock.h>
37 #include <linux/slab.h>
38 #include <linux/blkdev.h>
39 #include <linux/delay.h>
40 #include <linux/completion.h>
41 #include <linux/time.h>
42 #include <linux/interrupt.h>
43 #include <scsi/scsi_host.h>
47 static int aac_src_get_sync_status(struct aac_dev
*dev
);
49 static irqreturn_t
aac_src_intr_message(int irq
, void *dev_id
)
51 struct aac_msix_ctx
*ctx
;
53 unsigned long bellbits
, bellbits_shifted
;
55 int isFastResponse
, mode
;
58 ctx
= (struct aac_msix_ctx
*)dev_id
;
60 vector_no
= ctx
->vector_no
;
62 if (dev
->msi_enabled
) {
63 mode
= AAC_INT_MODE_MSI
;
65 bellbits
= src_readl(dev
, MUnit
.ODR_MSI
);
66 if (bellbits
& 0x40000)
67 mode
|= AAC_INT_MODE_AIF
;
68 if (bellbits
& 0x1000)
69 mode
|= AAC_INT_MODE_SYNC
;
72 mode
= AAC_INT_MODE_INTX
;
73 bellbits
= src_readl(dev
, MUnit
.ODR_R
);
74 if (bellbits
& PmDoorBellResponseSent
) {
75 bellbits
= PmDoorBellResponseSent
;
76 src_writel(dev
, MUnit
.ODR_C
, bellbits
);
77 src_readl(dev
, MUnit
.ODR_C
);
79 bellbits_shifted
= (bellbits
>> SRC_ODR_SHIFT
);
80 src_writel(dev
, MUnit
.ODR_C
, bellbits
);
81 src_readl(dev
, MUnit
.ODR_C
);
83 if (bellbits_shifted
& DoorBellAifPending
)
84 mode
|= AAC_INT_MODE_AIF
;
85 else if (bellbits_shifted
& OUTBOUNDDOORBELL_0
)
86 mode
|= AAC_INT_MODE_SYNC
;
90 if (mode
& AAC_INT_MODE_SYNC
) {
92 struct list_head
*entry
;
94 extern int aac_sync_mode
;
96 if (!aac_sync_mode
&& !dev
->msi_enabled
) {
97 src_writel(dev
, MUnit
.ODR_C
, bellbits
);
98 src_readl(dev
, MUnit
.ODR_C
);
102 if (dev
->sync_fib
->callback
)
103 dev
->sync_fib
->callback(dev
->sync_fib
->callback_data
,
105 spin_lock_irqsave(&dev
->sync_fib
->event_lock
, sflags
);
106 if (dev
->sync_fib
->flags
& FIB_CONTEXT_FLAG_WAIT
) {
107 dev
->management_fib_count
--;
108 up(&dev
->sync_fib
->event_wait
);
110 spin_unlock_irqrestore(&dev
->sync_fib
->event_lock
,
112 spin_lock_irqsave(&dev
->sync_lock
, sflags
);
113 if (!list_empty(&dev
->sync_fib_list
)) {
114 entry
= dev
->sync_fib_list
.next
;
115 dev
->sync_fib
= list_entry(entry
,
121 dev
->sync_fib
= NULL
;
123 spin_unlock_irqrestore(&dev
->sync_lock
, sflags
);
125 aac_adapter_sync_cmd(dev
, SEND_SYNCHRONOUS_FIB
,
126 (u32
)dev
->sync_fib
->hw_fib_pa
,
128 NULL
, NULL
, NULL
, NULL
, NULL
);
131 if (!dev
->msi_enabled
)
136 if (mode
& AAC_INT_MODE_AIF
) {
138 aac_intr_normal(dev
, 0, 2, 0, NULL
);
139 if (dev
->msi_enabled
)
140 aac_src_access_devreg(dev
, AAC_CLEAR_AIF_BIT
);
145 index
= dev
->host_rrq_idx
[vector_no
];
149 /* remove toggle bit (31) */
150 handle
= (dev
->host_rrq
[index
] & 0x7fffffff);
151 /* check fast response bit (30) */
152 if (handle
& 0x40000000)
154 handle
&= 0x0000ffff;
157 if (dev
->msi_enabled
&& dev
->max_msix
> 1)
158 atomic_dec(&dev
->rrq_outstanding
[vector_no
]);
159 aac_intr_normal(dev
, handle
-1, 0, isFastResponse
, NULL
);
160 dev
->host_rrq
[index
++] = 0;
161 if (index
== (vector_no
+ 1) * dev
->vector_cap
)
162 index
= vector_no
* dev
->vector_cap
;
163 dev
->host_rrq_idx
[vector_no
] = index
;
172 * aac_src_disable_interrupt - Disable interrupts
176 static void aac_src_disable_interrupt(struct aac_dev
*dev
)
178 src_writel(dev
, MUnit
.OIMR
, dev
->OIMR
= 0xffffffff);
182 * aac_src_enable_interrupt_message - Enable interrupts
186 static void aac_src_enable_interrupt_message(struct aac_dev
*dev
)
188 aac_src_access_devreg(dev
, AAC_ENABLE_INTERRUPT
);
192 * src_sync_cmd - send a command and wait
194 * @command: Command to execute
195 * @p1: first parameter
196 * @ret: adapter status
198 * This routine will send a synchronous command to the adapter and wait
199 * for its completion.
202 static int src_sync_cmd(struct aac_dev
*dev
, u32 command
,
203 u32 p1
, u32 p2
, u32 p3
, u32 p4
, u32 p5
, u32 p6
,
204 u32
*status
, u32
* r1
, u32
* r2
, u32
* r3
, u32
* r4
)
211 * Write the command into Mailbox 0
213 writel(command
, &dev
->IndexRegs
->Mailbox
[0]);
215 * Write the parameters into Mailboxes 1 - 6
217 writel(p1
, &dev
->IndexRegs
->Mailbox
[1]);
218 writel(p2
, &dev
->IndexRegs
->Mailbox
[2]);
219 writel(p3
, &dev
->IndexRegs
->Mailbox
[3]);
220 writel(p4
, &dev
->IndexRegs
->Mailbox
[4]);
223 * Clear the synch command doorbell to start on a clean slate.
225 if (!dev
->msi_enabled
)
228 OUTBOUNDDOORBELL_0
<< SRC_ODR_SHIFT
);
231 * Disable doorbell interrupts
233 src_writel(dev
, MUnit
.OIMR
, dev
->OIMR
= 0xffffffff);
236 * Force the completion of the mask register write before issuing
239 src_readl(dev
, MUnit
.OIMR
);
242 * Signal that there is a new synch command
244 src_writel(dev
, MUnit
.IDR
, INBOUNDDOORBELL_0
<< SRC_IDR_SHIFT
);
246 if (!dev
->sync_mode
|| command
!= SEND_SYNCHRONOUS_FIB
) {
250 if (command
== IOP_RESET_ALWAYS
) {
251 /* Wait up to 10 sec */
254 /* Wait up to 5 minutes */
257 while (time_before(jiffies
, start
+delay
)) {
258 udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
260 * Mon960 will set doorbell0 bit when it has completed the command.
262 if (aac_src_get_sync_status(dev
) & OUTBOUNDDOORBELL_0
) {
264 * Clear the doorbell.
266 if (dev
->msi_enabled
)
267 aac_src_access_devreg(dev
,
272 OUTBOUNDDOORBELL_0
<< SRC_ODR_SHIFT
);
277 * Yield the processor in case we are slow
281 if (unlikely(ok
!= 1)) {
283 * Restore interrupt mask even though we timed out
285 aac_adapter_enable_int(dev
);
289 * Pull the synch status from Mailbox 0.
292 *status
= readl(&dev
->IndexRegs
->Mailbox
[0]);
294 *r1
= readl(&dev
->IndexRegs
->Mailbox
[1]);
296 *r2
= readl(&dev
->IndexRegs
->Mailbox
[2]);
298 *r3
= readl(&dev
->IndexRegs
->Mailbox
[3]);
300 *r4
= readl(&dev
->IndexRegs
->Mailbox
[4]);
301 if (command
== GET_COMM_PREFERRED_SETTINGS
)
303 readl(&dev
->IndexRegs
->Mailbox
[5]) & 0xFFFF;
305 * Clear the synch command doorbell.
307 if (!dev
->msi_enabled
)
310 OUTBOUNDDOORBELL_0
<< SRC_ODR_SHIFT
);
314 * Restore interrupt mask
316 aac_adapter_enable_int(dev
);
321 * aac_src_interrupt_adapter - interrupt adapter
324 * Send an interrupt to the i960 and breakpoint it.
327 static void aac_src_interrupt_adapter(struct aac_dev
*dev
)
329 src_sync_cmd(dev
, BREAKPOINT_REQUEST
,
331 NULL
, NULL
, NULL
, NULL
, NULL
);
335 * aac_src_notify_adapter - send an event to the adapter
337 * @event: Event to send
339 * Notify the i960 that something it probably cares about has
343 static void aac_src_notify_adapter(struct aac_dev
*dev
, u32 event
)
348 src_writel(dev
, MUnit
.ODR_C
,
349 INBOUNDDOORBELL_1
<< SRC_ODR_SHIFT
);
351 case HostNormRespNotFull
:
352 src_writel(dev
, MUnit
.ODR_C
,
353 INBOUNDDOORBELL_4
<< SRC_ODR_SHIFT
);
355 case AdapNormRespQue
:
356 src_writel(dev
, MUnit
.ODR_C
,
357 INBOUNDDOORBELL_2
<< SRC_ODR_SHIFT
);
359 case HostNormCmdNotFull
:
360 src_writel(dev
, MUnit
.ODR_C
,
361 INBOUNDDOORBELL_3
<< SRC_ODR_SHIFT
);
364 src_writel(dev
, MUnit
.ODR_C
,
365 INBOUNDDOORBELL_6
<< SRC_ODR_SHIFT
);
368 src_writel(dev
, MUnit
.ODR_C
,
369 INBOUNDDOORBELL_5
<< SRC_ODR_SHIFT
);
378 * aac_src_start_adapter - activate adapter
381 * Start up processing on an i960 based AAC adapter
384 static void aac_src_start_adapter(struct aac_dev
*dev
)
386 struct aac_init
*init
;
389 /* reset host_rrq_idx first */
390 for (i
= 0; i
< dev
->max_msix
; i
++) {
391 dev
->host_rrq_idx
[i
] = i
* dev
->vector_cap
;
392 atomic_set(&dev
->rrq_outstanding
[i
], 0);
394 dev
->fibs_pushed_no
= 0;
397 init
->HostElapsedSeconds
= cpu_to_le32(get_seconds());
399 /* We can only use a 32 bit address here */
400 src_sync_cmd(dev
, INIT_STRUCT_BASE_ADDRESS
, (u32
)(ulong
)dev
->init_pa
,
401 0, 0, 0, 0, 0, NULL
, NULL
, NULL
, NULL
, NULL
);
405 * aac_src_check_health
406 * @dev: device to check if healthy
408 * Will attempt to determine if the specified adapter is alive and
409 * capable of handling requests, returning 0 if alive.
411 static int aac_src_check_health(struct aac_dev
*dev
)
413 u32 status
= src_readl(dev
, MUnit
.OMR
);
416 * Check to see if the board failed any self tests.
418 if (unlikely(status
& SELF_TEST_FAILED
))
422 * Check to see if the board panic'd.
424 if (unlikely(status
& KERNEL_PANIC
))
425 return (status
>> 16) & 0xFF;
427 * Wait for the adapter to be up and running.
429 if (unlikely(!(status
& KERNEL_UP_AND_RUNNING
)))
438 * aac_src_deliver_message
441 * Will send a fib, returning 0 if successful.
443 static int aac_src_deliver_message(struct fib
*fib
)
445 struct aac_dev
*dev
= fib
->dev
;
446 struct aac_queue
*q
= &dev
->queues
->queue
[AdapNormCmdQueue
];
449 struct aac_fib_xporthdr
*pFibX
;
450 u16 hdr_size
= le16_to_cpu(fib
->hw_fib_va
->header
.Size
);
452 atomic_inc(&q
->numpending
);
454 if (dev
->msi_enabled
&& fib
->hw_fib_va
->header
.Command
!= AifRequest
&&
456 u_int16_t vector_no
, first_choice
= 0xffff;
458 vector_no
= dev
->fibs_pushed_no
% dev
->max_msix
;
461 if (vector_no
== dev
->max_msix
)
463 if (atomic_read(&dev
->rrq_outstanding
[vector_no
]) <
466 if (0xffff == first_choice
)
467 first_choice
= vector_no
;
468 else if (vector_no
== first_choice
)
471 if (vector_no
== first_choice
)
473 atomic_inc(&dev
->rrq_outstanding
[vector_no
]);
474 if (dev
->fibs_pushed_no
== 0xffffffff)
475 dev
->fibs_pushed_no
= 0;
477 dev
->fibs_pushed_no
++;
478 fib
->hw_fib_va
->header
.Handle
+= (vector_no
<< 16);
481 if (dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE2
) {
482 /* Calculate the amount to the fibsize bits */
483 fibsize
= (hdr_size
+ 127) / 128 - 1;
484 if (fibsize
> (ALIGN32
- 1))
486 /* New FIB header, 32-bit */
487 address
= fib
->hw_fib_pa
;
488 fib
->hw_fib_va
->header
.StructType
= FIB_MAGIC2
;
489 fib
->hw_fib_va
->header
.SenderFibAddress
= (u32
)address
;
490 fib
->hw_fib_va
->header
.u
.TimeStamp
= 0;
491 BUG_ON(upper_32_bits(address
) != 0L);
494 /* Calculate the amount to the fibsize bits */
495 fibsize
= (sizeof(struct aac_fib_xporthdr
) + hdr_size
+ 127) / 128 - 1;
496 if (fibsize
> (ALIGN32
- 1))
499 /* Fill XPORT header */
500 pFibX
= (void *)fib
->hw_fib_va
- sizeof(struct aac_fib_xporthdr
);
501 pFibX
->Handle
= cpu_to_le32(fib
->hw_fib_va
->header
.Handle
);
502 pFibX
->HostAddress
= cpu_to_le64(fib
->hw_fib_pa
);
503 pFibX
->Size
= cpu_to_le32(hdr_size
);
506 * The xport header has been 32-byte aligned for us so that fibsize
507 * can be masked out of this address by hardware. -- BenC
509 address
= fib
->hw_fib_pa
- sizeof(struct aac_fib_xporthdr
);
510 if (address
& (ALIGN32
- 1))
515 src_writel(dev
, MUnit
.IQ_H
, upper_32_bits(address
) & 0xffffffff);
516 src_writel(dev
, MUnit
.IQ_L
, address
& 0xffffffff);
523 * @size: mapping resize request
526 static int aac_src_ioremap(struct aac_dev
*dev
, u32 size
)
529 iounmap(dev
->regs
.src
.bar1
);
530 dev
->regs
.src
.bar1
= NULL
;
531 iounmap(dev
->regs
.src
.bar0
);
532 dev
->base
= dev
->regs
.src
.bar0
= NULL
;
535 dev
->regs
.src
.bar1
= ioremap(pci_resource_start(dev
->pdev
, 2),
536 AAC_MIN_SRC_BAR1_SIZE
);
538 if (dev
->regs
.src
.bar1
== NULL
)
540 dev
->base
= dev
->regs
.src
.bar0
= ioremap(dev
->base_start
, size
);
541 if (dev
->base
== NULL
) {
542 iounmap(dev
->regs
.src
.bar1
);
543 dev
->regs
.src
.bar1
= NULL
;
546 dev
->IndexRegs
= &((struct src_registers __iomem
*)
547 dev
->base
)->u
.tupelo
.IndexRegs
;
553 * @size: mapping resize request
556 static int aac_srcv_ioremap(struct aac_dev
*dev
, u32 size
)
559 iounmap(dev
->regs
.src
.bar0
);
560 dev
->base
= dev
->regs
.src
.bar0
= NULL
;
563 dev
->base
= dev
->regs
.src
.bar0
= ioremap(dev
->base_start
, size
);
564 if (dev
->base
== NULL
)
566 dev
->IndexRegs
= &((struct src_registers __iomem
*)
567 dev
->base
)->u
.denali
.IndexRegs
;
571 static int aac_src_restart_adapter(struct aac_dev
*dev
, int bled
)
577 printk(KERN_ERR
"%s%d: adapter kernel panic'd %x.\n",
578 dev
->name
, dev
->id
, bled
);
579 dev
->a_ops
.adapter_enable_int
= aac_src_disable_interrupt
;
580 bled
= aac_adapter_sync_cmd(dev
, IOP_RESET_ALWAYS
,
581 0, 0, 0, 0, 0, 0, &var
, &reset_mask
, NULL
, NULL
, NULL
);
582 if ((bled
|| (var
!= 0x00000001)) &&
585 else if (dev
->doorbell_mask
) {
586 reset_mask
= dev
->doorbell_mask
;
591 if ((dev
->pdev
->device
== PMC_DEVICE_S7
||
592 dev
->pdev
->device
== PMC_DEVICE_S8
||
593 dev
->pdev
->device
== PMC_DEVICE_S9
) && dev
->msi_enabled
) {
594 aac_src_access_devreg(dev
, AAC_ENABLE_INTX
);
595 dev
->msi_enabled
= 0;
596 msleep(5000); /* Delay 5 seconds */
599 if (!bled
&& (dev
->supplement_adapter_info
.SupportedOptions2
&
600 AAC_OPTION_DOORBELL_RESET
)) {
601 src_writel(dev
, MUnit
.IDR
, reset_mask
);
604 src_writel(dev
, MUnit
.IDR
, 0x100);
609 if (src_readl(dev
, MUnit
.OMR
) & KERNEL_PANIC
)
612 if (startup_timeout
< 300)
613 startup_timeout
= 300;
619 * aac_src_select_comm - Select communications method
621 * @comm: communications method
623 int aac_src_select_comm(struct aac_dev
*dev
, int comm
)
626 case AAC_COMM_MESSAGE
:
627 dev
->a_ops
.adapter_intr
= aac_src_intr_message
;
628 dev
->a_ops
.adapter_deliver
= aac_src_deliver_message
;
637 * aac_src_init - initialize an Cardinal Frey Bar card
638 * @dev: device to configure
642 int aac_src_init(struct aac_dev
*dev
)
645 unsigned long status
;
647 int instance
= dev
->id
;
648 const char *name
= dev
->name
;
650 dev
->a_ops
.adapter_ioremap
= aac_src_ioremap
;
651 dev
->a_ops
.adapter_comm
= aac_src_select_comm
;
653 dev
->base_size
= AAC_MIN_SRC_BAR0_SIZE
;
654 if (aac_adapter_ioremap(dev
, dev
->base_size
)) {
655 printk(KERN_WARNING
"%s: unable to map adapter.\n", name
);
659 /* Failure to reset here is an option ... */
660 dev
->a_ops
.adapter_sync_cmd
= src_sync_cmd
;
661 dev
->a_ops
.adapter_enable_int
= aac_src_disable_interrupt
;
662 if ((aac_reset_devices
|| reset_devices
) &&
663 !aac_src_restart_adapter(dev
, 0))
666 * Check to see if the board panic'd while booting.
668 status
= src_readl(dev
, MUnit
.OMR
);
669 if (status
& KERNEL_PANIC
) {
670 if (aac_src_restart_adapter(dev
, aac_src_check_health(dev
)))
675 * Check to see if the board failed any self tests.
677 status
= src_readl(dev
, MUnit
.OMR
);
678 if (status
& SELF_TEST_FAILED
) {
679 printk(KERN_ERR
"%s%d: adapter self-test failed.\n",
680 dev
->name
, instance
);
684 * Check to see if the monitor panic'd while booting.
686 if (status
& MONITOR_PANIC
) {
687 printk(KERN_ERR
"%s%d: adapter monitor panic.\n",
688 dev
->name
, instance
);
693 * Wait for the adapter to be up and running. Wait up to 3 minutes
695 while (!((status
= src_readl(dev
, MUnit
.OMR
)) &
696 KERNEL_UP_AND_RUNNING
)) {
698 (status
& (KERNEL_PANIC
|SELF_TEST_FAILED
|MONITOR_PANIC
))) ||
699 time_after(jiffies
, start
+HZ
*startup_timeout
)) {
700 printk(KERN_ERR
"%s%d: adapter kernel failed to start, init status = %lx.\n",
701 dev
->name
, instance
, status
);
705 ((status
& (KERNEL_PANIC
|SELF_TEST_FAILED
|MONITOR_PANIC
)) ||
706 time_after(jiffies
, start
+ HZ
*
707 ((startup_timeout
> 60)
708 ? (startup_timeout
- 60)
709 : (startup_timeout
/ 2))))) {
710 if (likely(!aac_src_restart_adapter(dev
,
711 aac_src_check_health(dev
))))
717 if (restart
&& aac_commit
)
720 * Fill in the common function dispatch table.
722 dev
->a_ops
.adapter_interrupt
= aac_src_interrupt_adapter
;
723 dev
->a_ops
.adapter_disable_int
= aac_src_disable_interrupt
;
724 dev
->a_ops
.adapter_enable_int
= aac_src_disable_interrupt
;
725 dev
->a_ops
.adapter_notify
= aac_src_notify_adapter
;
726 dev
->a_ops
.adapter_sync_cmd
= src_sync_cmd
;
727 dev
->a_ops
.adapter_check_health
= aac_src_check_health
;
728 dev
->a_ops
.adapter_restart
= aac_src_restart_adapter
;
731 * First clear out all interrupts. Then enable the one's that we
734 aac_adapter_comm(dev
, AAC_COMM_MESSAGE
);
735 aac_adapter_disable_int(dev
);
736 src_writel(dev
, MUnit
.ODR_C
, 0xffffffff);
737 aac_adapter_enable_int(dev
);
739 if (aac_init_adapter(dev
) == NULL
)
741 if (dev
->comm_interface
!= AAC_COMM_MESSAGE_TYPE1
)
744 dev
->msi
= aac_msi
&& !pci_enable_msi(dev
->pdev
);
746 dev
->aac_msix
[0].vector_no
= 0;
747 dev
->aac_msix
[0].dev
= dev
;
749 if (request_irq(dev
->pdev
->irq
, dev
->a_ops
.adapter_intr
,
750 IRQF_SHARED
, "aacraid", &(dev
->aac_msix
[0])) < 0) {
753 pci_disable_msi(dev
->pdev
);
755 printk(KERN_ERR
"%s%d: Interrupt unavailable.\n",
759 dev
->dbg_base
= pci_resource_start(dev
->pdev
, 2);
760 dev
->dbg_base_mapped
= dev
->regs
.src
.bar1
;
761 dev
->dbg_size
= AAC_MIN_SRC_BAR1_SIZE
;
762 dev
->a_ops
.adapter_enable_int
= aac_src_enable_interrupt_message
;
764 aac_adapter_enable_int(dev
);
766 if (!dev
->sync_mode
) {
768 * Tell the adapter that all is configured, and it can
769 * start accepting requests
771 aac_src_start_adapter(dev
);
781 * aac_srcv_init - initialize an SRCv card
782 * @dev: device to configure
786 int aac_srcv_init(struct aac_dev
*dev
)
789 unsigned long status
;
791 int instance
= dev
->id
;
793 const char *name
= dev
->name
;
796 dev
->a_ops
.adapter_ioremap
= aac_srcv_ioremap
;
797 dev
->a_ops
.adapter_comm
= aac_src_select_comm
;
799 dev
->base_size
= AAC_MIN_SRCV_BAR0_SIZE
;
800 if (aac_adapter_ioremap(dev
, dev
->base_size
)) {
801 printk(KERN_WARNING
"%s: unable to map adapter.\n", name
);
805 /* Failure to reset here is an option ... */
806 dev
->a_ops
.adapter_sync_cmd
= src_sync_cmd
;
807 dev
->a_ops
.adapter_enable_int
= aac_src_disable_interrupt
;
808 if ((aac_reset_devices
|| reset_devices
) &&
809 !aac_src_restart_adapter(dev
, 0))
812 * Check to see if flash update is running.
813 * Wait for the adapter to be up and running. Wait up to 5 minutes
815 status
= src_readl(dev
, MUnit
.OMR
);
816 if (status
& FLASH_UPD_PENDING
) {
819 status
= src_readl(dev
, MUnit
.OMR
);
820 if (time_after(jiffies
, start
+HZ
*FWUPD_TIMEOUT
)) {
821 printk(KERN_ERR
"%s%d: adapter flash update failed.\n",
822 dev
->name
, instance
);
825 } while (!(status
& FLASH_UPD_SUCCESS
) &&
826 !(status
& FLASH_UPD_FAILED
));
828 * Because right now FW is doing a soft reset,
829 * do not read scratch pad register at this time
834 * Check to see if the board panic'd while booting.
836 status
= src_readl(dev
, MUnit
.OMR
);
837 if (status
& KERNEL_PANIC
) {
838 if (aac_src_restart_adapter(dev
, aac_src_check_health(dev
)))
843 * Check to see if the board failed any self tests.
845 status
= src_readl(dev
, MUnit
.OMR
);
846 if (status
& SELF_TEST_FAILED
) {
847 printk(KERN_ERR
"%s%d: adapter self-test failed.\n", dev
->name
, instance
);
851 * Check to see if the monitor panic'd while booting.
853 if (status
& MONITOR_PANIC
) {
854 printk(KERN_ERR
"%s%d: adapter monitor panic.\n", dev
->name
, instance
);
859 * Wait for the adapter to be up and running. Wait up to 3 minutes
861 while (!((status
= src_readl(dev
, MUnit
.OMR
)) &
862 KERNEL_UP_AND_RUNNING
) ||
863 status
== 0xffffffff) {
865 (status
& (KERNEL_PANIC
|SELF_TEST_FAILED
|MONITOR_PANIC
))) ||
866 time_after(jiffies
, start
+HZ
*startup_timeout
)) {
867 printk(KERN_ERR
"%s%d: adapter kernel failed to start, init status = %lx.\n",
868 dev
->name
, instance
, status
);
872 ((status
& (KERNEL_PANIC
|SELF_TEST_FAILED
|MONITOR_PANIC
)) ||
873 time_after(jiffies
, start
+ HZ
*
874 ((startup_timeout
> 60)
875 ? (startup_timeout
- 60)
876 : (startup_timeout
/ 2))))) {
877 if (likely(!aac_src_restart_adapter(dev
, aac_src_check_health(dev
))))
883 if (restart
&& aac_commit
)
886 * Fill in the common function dispatch table.
888 dev
->a_ops
.adapter_interrupt
= aac_src_interrupt_adapter
;
889 dev
->a_ops
.adapter_disable_int
= aac_src_disable_interrupt
;
890 dev
->a_ops
.adapter_enable_int
= aac_src_disable_interrupt
;
891 dev
->a_ops
.adapter_notify
= aac_src_notify_adapter
;
892 dev
->a_ops
.adapter_sync_cmd
= src_sync_cmd
;
893 dev
->a_ops
.adapter_check_health
= aac_src_check_health
;
894 dev
->a_ops
.adapter_restart
= aac_src_restart_adapter
;
897 * First clear out all interrupts. Then enable the one's that we
900 aac_adapter_comm(dev
, AAC_COMM_MESSAGE
);
901 aac_adapter_disable_int(dev
);
902 src_writel(dev
, MUnit
.ODR_C
, 0xffffffff);
903 aac_adapter_enable_int(dev
);
905 if (aac_init_adapter(dev
) == NULL
)
907 if (dev
->comm_interface
!= AAC_COMM_MESSAGE_TYPE2
)
909 if (dev
->msi_enabled
)
910 aac_src_access_devreg(dev
, AAC_ENABLE_MSIX
);
911 if (!dev
->sync_mode
&& dev
->msi_enabled
&& dev
->max_msix
> 1) {
912 cpu
= cpumask_first(cpu_online_mask
);
913 for (i
= 0; i
< dev
->max_msix
; i
++) {
914 dev
->aac_msix
[i
].vector_no
= i
;
915 dev
->aac_msix
[i
].dev
= dev
;
917 if (request_irq(dev
->msixentry
[i
].vector
,
918 dev
->a_ops
.adapter_intr
,
921 &(dev
->aac_msix
[i
]))) {
922 printk(KERN_ERR
"%s%d: Failed to register IRQ for vector %d.\n",
924 for (j
= 0 ; j
< i
; j
++)
925 free_irq(dev
->msixentry
[j
].vector
,
926 &(dev
->aac_msix
[j
]));
927 pci_disable_msix(dev
->pdev
);
930 if (irq_set_affinity_hint(
931 dev
->msixentry
[i
].vector
,
932 get_cpu_mask(cpu
))) {
933 printk(KERN_ERR
"%s%d: Failed to set IRQ affinity for cpu %d\n",
934 name
, instance
, cpu
);
936 cpu
= cpumask_next(cpu
, cpu_online_mask
);
939 dev
->aac_msix
[0].vector_no
= 0;
940 dev
->aac_msix
[0].dev
= dev
;
942 if (request_irq(dev
->pdev
->irq
, dev
->a_ops
.adapter_intr
,
945 &(dev
->aac_msix
[0])) < 0) {
947 pci_disable_msi(dev
->pdev
);
948 printk(KERN_ERR
"%s%d: Interrupt unavailable.\n",
953 dev
->dbg_base
= dev
->base_start
;
954 dev
->dbg_base_mapped
= dev
->base
;
955 dev
->dbg_size
= dev
->base_size
;
956 dev
->a_ops
.adapter_enable_int
= aac_src_enable_interrupt_message
;
958 aac_adapter_enable_int(dev
);
960 if (!dev
->sync_mode
) {
962 * Tell the adapter that all is configured, and it can
963 * start accepting requests
965 aac_src_start_adapter(dev
);
974 void aac_src_access_devreg(struct aac_dev
*dev
, int mode
)
979 case AAC_ENABLE_INTERRUPT
:
982 dev
->OIMR
= (dev
->msi_enabled
?
983 AAC_INT_ENABLE_TYPE1_MSIX
:
984 AAC_INT_ENABLE_TYPE1_INTX
));
987 case AAC_DISABLE_INTERRUPT
:
990 dev
->OIMR
= AAC_INT_DISABLE_ALL
);
993 case AAC_ENABLE_MSIX
:
995 val
= src_readl(dev
, MUnit
.IDR
);
997 src_writel(dev
, MUnit
.IDR
, val
);
998 src_readl(dev
, MUnit
.IDR
);
1000 val
= PMC_ALL_INTERRUPT_BITS
;
1001 src_writel(dev
, MUnit
.IOAR
, val
);
1002 val
= src_readl(dev
, MUnit
.OIMR
);
1005 val
& (~(PMC_GLOBAL_INT_BIT2
| PMC_GLOBAL_INT_BIT0
)));
1008 case AAC_DISABLE_MSIX
:
1010 val
= src_readl(dev
, MUnit
.IDR
);
1012 src_writel(dev
, MUnit
.IDR
, val
);
1013 src_readl(dev
, MUnit
.IDR
);
1016 case AAC_CLEAR_AIF_BIT
:
1018 val
= src_readl(dev
, MUnit
.IDR
);
1020 src_writel(dev
, MUnit
.IDR
, val
);
1021 src_readl(dev
, MUnit
.IDR
);
1024 case AAC_CLEAR_SYNC_BIT
:
1026 val
= src_readl(dev
, MUnit
.IDR
);
1028 src_writel(dev
, MUnit
.IDR
, val
);
1029 src_readl(dev
, MUnit
.IDR
);
1032 case AAC_ENABLE_INTX
:
1034 val
= src_readl(dev
, MUnit
.IDR
);
1036 src_writel(dev
, MUnit
.IDR
, val
);
1037 src_readl(dev
, MUnit
.IDR
);
1039 val
= PMC_ALL_INTERRUPT_BITS
;
1040 src_writel(dev
, MUnit
.IOAR
, val
);
1041 src_readl(dev
, MUnit
.IOAR
);
1042 val
= src_readl(dev
, MUnit
.OIMR
);
1043 src_writel(dev
, MUnit
.OIMR
,
1044 val
& (~(PMC_GLOBAL_INT_BIT2
)));
1052 static int aac_src_get_sync_status(struct aac_dev
*dev
)
1057 if (dev
->msi_enabled
)
1058 val
= src_readl(dev
, MUnit
.ODR_MSI
) & 0x1000 ? 1 : 0;
1060 val
= src_readl(dev
, MUnit
.ODR_R
) >> SRC_ODR_SHIFT
;