1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Adaptec AAC series RAID controller driver
4 * (c) Copyright 2001 Red Hat Inc.
6 * based on the old aacraid driver that is..
7 * Adaptec aacraid device driver for Linux.
9 * Copyright (c) 2000-2010 Adaptec, Inc.
10 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
11 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
16 * Abstract: Hardware Device Interface for PMC SRC based controllers
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/types.h>
22 #include <linux/pci.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/blkdev.h>
26 #include <linux/delay.h>
27 #include <linux/completion.h>
28 #include <linux/time.h>
29 #include <linux/interrupt.h>
30 #include <scsi/scsi_host.h>
34 static int aac_src_get_sync_status(struct aac_dev
*dev
);
36 static irqreturn_t
aac_src_intr_message(int irq
, void *dev_id
)
38 struct aac_msix_ctx
*ctx
;
40 unsigned long bellbits
, bellbits_shifted
;
42 int isFastResponse
, mode
;
45 ctx
= (struct aac_msix_ctx
*)dev_id
;
47 vector_no
= ctx
->vector_no
;
49 if (dev
->msi_enabled
) {
50 mode
= AAC_INT_MODE_MSI
;
52 bellbits
= src_readl(dev
, MUnit
.ODR_MSI
);
53 if (bellbits
& 0x40000)
54 mode
|= AAC_INT_MODE_AIF
;
55 if (bellbits
& 0x1000)
56 mode
|= AAC_INT_MODE_SYNC
;
59 mode
= AAC_INT_MODE_INTX
;
60 bellbits
= src_readl(dev
, MUnit
.ODR_R
);
61 if (bellbits
& PmDoorBellResponseSent
) {
62 bellbits
= PmDoorBellResponseSent
;
63 src_writel(dev
, MUnit
.ODR_C
, bellbits
);
64 src_readl(dev
, MUnit
.ODR_C
);
66 bellbits_shifted
= (bellbits
>> SRC_ODR_SHIFT
);
67 src_writel(dev
, MUnit
.ODR_C
, bellbits
);
68 src_readl(dev
, MUnit
.ODR_C
);
70 if (bellbits_shifted
& DoorBellAifPending
)
71 mode
|= AAC_INT_MODE_AIF
;
72 else if (bellbits_shifted
& OUTBOUNDDOORBELL_0
)
73 mode
|= AAC_INT_MODE_SYNC
;
77 if (mode
& AAC_INT_MODE_SYNC
) {
79 struct list_head
*entry
;
81 extern int aac_sync_mode
;
83 if (!aac_sync_mode
&& !dev
->msi_enabled
) {
84 src_writel(dev
, MUnit
.ODR_C
, bellbits
);
85 src_readl(dev
, MUnit
.ODR_C
);
89 if (dev
->sync_fib
->callback
)
90 dev
->sync_fib
->callback(dev
->sync_fib
->callback_data
,
92 spin_lock_irqsave(&dev
->sync_fib
->event_lock
, sflags
);
93 if (dev
->sync_fib
->flags
& FIB_CONTEXT_FLAG_WAIT
) {
94 dev
->management_fib_count
--;
95 complete(&dev
->sync_fib
->event_wait
);
97 spin_unlock_irqrestore(&dev
->sync_fib
->event_lock
,
99 spin_lock_irqsave(&dev
->sync_lock
, sflags
);
100 if (!list_empty(&dev
->sync_fib_list
)) {
101 entry
= dev
->sync_fib_list
.next
;
102 dev
->sync_fib
= list_entry(entry
,
108 dev
->sync_fib
= NULL
;
110 spin_unlock_irqrestore(&dev
->sync_lock
, sflags
);
112 aac_adapter_sync_cmd(dev
, SEND_SYNCHRONOUS_FIB
,
113 (u32
)dev
->sync_fib
->hw_fib_pa
,
115 NULL
, NULL
, NULL
, NULL
, NULL
);
118 if (!dev
->msi_enabled
)
123 if (mode
& AAC_INT_MODE_AIF
) {
125 if (dev
->sa_firmware
) {
126 u32 events
= src_readl(dev
, MUnit
.SCR0
);
128 aac_intr_normal(dev
, events
, 1, 0, NULL
);
129 writel(events
, &dev
->IndexRegs
->Mailbox
[0]);
130 src_writel(dev
, MUnit
.IDR
, 1 << 23);
132 if (dev
->aif_thread
&& dev
->fsa_dev
)
133 aac_intr_normal(dev
, 0, 2, 0, NULL
);
135 if (dev
->msi_enabled
)
136 aac_src_access_devreg(dev
, AAC_CLEAR_AIF_BIT
);
141 index
= dev
->host_rrq_idx
[vector_no
];
145 /* remove toggle bit (31) */
146 handle
= le32_to_cpu((dev
->host_rrq
[index
])
148 /* check fast response bits (30, 1) */
149 if (handle
& 0x40000000)
151 handle
&= 0x0000ffff;
155 if (dev
->msi_enabled
&& dev
->max_msix
> 1)
156 atomic_dec(&dev
->rrq_outstanding
[vector_no
]);
157 aac_intr_normal(dev
, handle
, 0, isFastResponse
, NULL
);
158 dev
->host_rrq
[index
++] = 0;
159 if (index
== (vector_no
+ 1) * dev
->vector_cap
)
160 index
= vector_no
* dev
->vector_cap
;
161 dev
->host_rrq_idx
[vector_no
] = index
;
170 * aac_src_disable_interrupt - Disable interrupts
174 static void aac_src_disable_interrupt(struct aac_dev
*dev
)
176 src_writel(dev
, MUnit
.OIMR
, dev
->OIMR
= 0xffffffff);
180 * aac_src_enable_interrupt_message - Enable interrupts
184 static void aac_src_enable_interrupt_message(struct aac_dev
*dev
)
186 aac_src_access_devreg(dev
, AAC_ENABLE_INTERRUPT
);
190 * src_sync_cmd - send a command and wait
192 * @command: Command to execute
193 * @p1: first parameter
194 * @ret: adapter status
196 * This routine will send a synchronous command to the adapter and wait
197 * for its completion.
200 static int src_sync_cmd(struct aac_dev
*dev
, u32 command
,
201 u32 p1
, u32 p2
, u32 p3
, u32 p4
, u32 p5
, u32 p6
,
202 u32
*status
, u32
* r1
, u32
* r2
, u32
* r3
, u32
* r4
)
209 * Write the command into Mailbox 0
211 writel(command
, &dev
->IndexRegs
->Mailbox
[0]);
213 * Write the parameters into Mailboxes 1 - 6
215 writel(p1
, &dev
->IndexRegs
->Mailbox
[1]);
216 writel(p2
, &dev
->IndexRegs
->Mailbox
[2]);
217 writel(p3
, &dev
->IndexRegs
->Mailbox
[3]);
218 writel(p4
, &dev
->IndexRegs
->Mailbox
[4]);
221 * Clear the synch command doorbell to start on a clean slate.
223 if (!dev
->msi_enabled
)
226 OUTBOUNDDOORBELL_0
<< SRC_ODR_SHIFT
);
229 * Disable doorbell interrupts
231 src_writel(dev
, MUnit
.OIMR
, dev
->OIMR
= 0xffffffff);
234 * Force the completion of the mask register write before issuing
237 src_readl(dev
, MUnit
.OIMR
);
240 * Signal that there is a new synch command
242 src_writel(dev
, MUnit
.IDR
, INBOUNDDOORBELL_0
<< SRC_IDR_SHIFT
);
244 if ((!dev
->sync_mode
|| command
!= SEND_SYNCHRONOUS_FIB
) &&
245 !dev
->in_soft_reset
) {
249 if (command
== IOP_RESET_ALWAYS
) {
250 /* Wait up to 10 sec */
253 /* Wait up to 5 minutes */
256 while (time_before(jiffies
, start
+delay
)) {
257 udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
259 * Mon960 will set doorbell0 bit when it has completed the command.
261 if (aac_src_get_sync_status(dev
) & OUTBOUNDDOORBELL_0
) {
263 * Clear the doorbell.
265 if (dev
->msi_enabled
)
266 aac_src_access_devreg(dev
,
271 OUTBOUNDDOORBELL_0
<< SRC_ODR_SHIFT
);
276 * Yield the processor in case we are slow
280 if (unlikely(ok
!= 1)) {
282 * Restore interrupt mask even though we timed out
284 aac_adapter_enable_int(dev
);
288 * Pull the synch status from Mailbox 0.
291 *status
= readl(&dev
->IndexRegs
->Mailbox
[0]);
293 *r1
= readl(&dev
->IndexRegs
->Mailbox
[1]);
295 *r2
= readl(&dev
->IndexRegs
->Mailbox
[2]);
297 *r3
= readl(&dev
->IndexRegs
->Mailbox
[3]);
299 *r4
= readl(&dev
->IndexRegs
->Mailbox
[4]);
300 if (command
== GET_COMM_PREFERRED_SETTINGS
)
302 readl(&dev
->IndexRegs
->Mailbox
[5]) & 0xFFFF;
304 * Clear the synch command doorbell.
306 if (!dev
->msi_enabled
)
309 OUTBOUNDDOORBELL_0
<< SRC_ODR_SHIFT
);
313 * Restore interrupt mask
315 aac_adapter_enable_int(dev
);
320 * aac_src_interrupt_adapter - interrupt adapter
323 * Send an interrupt to the i960 and breakpoint it.
326 static void aac_src_interrupt_adapter(struct aac_dev
*dev
)
328 src_sync_cmd(dev
, BREAKPOINT_REQUEST
,
330 NULL
, NULL
, NULL
, NULL
, NULL
);
334 * aac_src_notify_adapter - send an event to the adapter
336 * @event: Event to send
338 * Notify the i960 that something it probably cares about has
342 static void aac_src_notify_adapter(struct aac_dev
*dev
, u32 event
)
347 src_writel(dev
, MUnit
.ODR_C
,
348 INBOUNDDOORBELL_1
<< SRC_ODR_SHIFT
);
350 case HostNormRespNotFull
:
351 src_writel(dev
, MUnit
.ODR_C
,
352 INBOUNDDOORBELL_4
<< SRC_ODR_SHIFT
);
354 case AdapNormRespQue
:
355 src_writel(dev
, MUnit
.ODR_C
,
356 INBOUNDDOORBELL_2
<< SRC_ODR_SHIFT
);
358 case HostNormCmdNotFull
:
359 src_writel(dev
, MUnit
.ODR_C
,
360 INBOUNDDOORBELL_3
<< SRC_ODR_SHIFT
);
363 src_writel(dev
, MUnit
.ODR_C
,
364 INBOUNDDOORBELL_6
<< SRC_ODR_SHIFT
);
367 src_writel(dev
, MUnit
.ODR_C
,
368 INBOUNDDOORBELL_5
<< SRC_ODR_SHIFT
);
377 * aac_src_start_adapter - activate adapter
380 * Start up processing on an i960 based AAC adapter
383 static void aac_src_start_adapter(struct aac_dev
*dev
)
385 union aac_init
*init
;
388 /* reset host_rrq_idx first */
389 for (i
= 0; i
< dev
->max_msix
; i
++) {
390 dev
->host_rrq_idx
[i
] = i
* dev
->vector_cap
;
391 atomic_set(&dev
->rrq_outstanding
[i
], 0);
393 atomic_set(&dev
->msix_counter
, 0);
394 dev
->fibs_pushed_no
= 0;
397 if (dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE3
) {
398 init
->r8
.host_elapsed_seconds
=
399 cpu_to_le32(ktime_get_real_seconds());
400 src_sync_cmd(dev
, INIT_STRUCT_BASE_ADDRESS
,
401 lower_32_bits(dev
->init_pa
),
402 upper_32_bits(dev
->init_pa
),
404 (AAC_MAX_HRRQ
- 1) * sizeof(struct _rrq
),
405 0, 0, 0, NULL
, NULL
, NULL
, NULL
, NULL
);
407 init
->r7
.host_elapsed_seconds
=
408 cpu_to_le32(ktime_get_real_seconds());
409 // We can only use a 32 bit address here
410 src_sync_cmd(dev
, INIT_STRUCT_BASE_ADDRESS
,
411 (u32
)(ulong
)dev
->init_pa
, 0, 0, 0, 0, 0,
412 NULL
, NULL
, NULL
, NULL
, NULL
);
418 * aac_src_check_health
419 * @dev: device to check if healthy
421 * Will attempt to determine if the specified adapter is alive and
422 * capable of handling requests, returning 0 if alive.
424 static int aac_src_check_health(struct aac_dev
*dev
)
426 u32 status
= src_readl(dev
, MUnit
.OMR
);
429 * Check to see if the board panic'd.
431 if (unlikely(status
& KERNEL_PANIC
))
435 * Check to see if the board failed any self tests.
437 if (unlikely(status
& SELF_TEST_FAILED
))
441 * Check to see if the board failed any self tests.
443 if (unlikely(status
& MONITOR_PANIC
))
447 * Wait for the adapter to be up and running.
449 if (unlikely(!(status
& KERNEL_UP_AND_RUNNING
)))
460 return (status
>> 16) & 0xFF;
463 static inline u32
aac_get_vector(struct aac_dev
*dev
)
465 return atomic_inc_return(&dev
->msix_counter
)%dev
->max_msix
;
469 * aac_src_deliver_message
472 * Will send a fib, returning 0 if successful.
474 static int aac_src_deliver_message(struct fib
*fib
)
476 struct aac_dev
*dev
= fib
->dev
;
477 struct aac_queue
*q
= &dev
->queues
->queue
[AdapNormCmdQueue
];
480 struct aac_fib_xporthdr
*pFibX
;
488 atomic_inc(&q
->numpending
);
490 native_hba
= (fib
->flags
& FIB_CONTEXT_FLAG_NATIVE_HBA
) ? 1 : 0;
493 if (dev
->msi_enabled
&& dev
->max_msix
> 1 &&
494 (native_hba
|| fib
->hw_fib_va
->header
.Command
!= AifRequest
)) {
496 if ((dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE3
)
498 vector_no
= aac_get_vector(dev
);
500 vector_no
= fib
->vector_no
;
503 if (fib
->flags
& FIB_CONTEXT_FLAG_NATIVE_HBA_TMF
) {
504 struct aac_hba_tm_req
*tm_req
;
506 tm_req
= (struct aac_hba_tm_req
*)
508 if (tm_req
->iu_type
==
509 HBA_IU_TYPE_SCSI_TM_REQ
) {
510 ((struct aac_hba_tm_req
*)
511 fib
->hw_fib_va
)->reply_qid
513 ((struct aac_hba_tm_req
*)
514 fib
->hw_fib_va
)->request_id
515 += (vector_no
<< 16);
517 ((struct aac_hba_reset_req
*)
518 fib
->hw_fib_va
)->reply_qid
520 ((struct aac_hba_reset_req
*)
521 fib
->hw_fib_va
)->request_id
522 += (vector_no
<< 16);
525 ((struct aac_hba_cmd_req
*)
526 fib
->hw_fib_va
)->reply_qid
528 ((struct aac_hba_cmd_req
*)
529 fib
->hw_fib_va
)->request_id
530 += (vector_no
<< 16);
533 fib
->hw_fib_va
->header
.Handle
+= (vector_no
<< 16);
539 atomic_inc(&dev
->rrq_outstanding
[vector_no
]);
542 address
= fib
->hw_fib_pa
;
543 fibsize
= (fib
->hbacmd_size
+ 127) / 128 - 1;
548 src_writeq(dev
, MUnit
.IQN_L
, (u64
)address
);
550 spin_lock_irqsave(&fib
->dev
->iq_lock
, flags
);
551 src_writel(dev
, MUnit
.IQN_H
,
552 upper_32_bits(address
) & 0xffffffff);
553 src_writel(dev
, MUnit
.IQN_L
, address
& 0xffffffff);
554 spin_unlock_irqrestore(&fib
->dev
->iq_lock
, flags
);
557 if (dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE2
||
558 dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE3
) {
559 /* Calculate the amount to the fibsize bits */
560 fibsize
= (le16_to_cpu(fib
->hw_fib_va
->header
.Size
)
562 /* New FIB header, 32-bit */
563 address
= fib
->hw_fib_pa
;
564 fib
->hw_fib_va
->header
.StructType
= FIB_MAGIC2
;
565 fib
->hw_fib_va
->header
.SenderFibAddress
=
566 cpu_to_le32((u32
)address
);
567 fib
->hw_fib_va
->header
.u
.TimeStamp
= 0;
568 WARN_ON(upper_32_bits(address
) != 0L);
570 /* Calculate the amount to the fibsize bits */
571 fibsize
= (sizeof(struct aac_fib_xporthdr
) +
572 le16_to_cpu(fib
->hw_fib_va
->header
.Size
)
574 /* Fill XPORT header */
575 pFibX
= (struct aac_fib_xporthdr
*)
576 ((unsigned char *)fib
->hw_fib_va
-
577 sizeof(struct aac_fib_xporthdr
));
578 pFibX
->Handle
= fib
->hw_fib_va
->header
.Handle
;
580 cpu_to_le64((u64
)fib
->hw_fib_pa
);
581 pFibX
->Size
= cpu_to_le32(
582 le16_to_cpu(fib
->hw_fib_va
->header
.Size
));
583 address
= fib
->hw_fib_pa
-
584 (u64
)sizeof(struct aac_fib_xporthdr
);
591 src_writeq(dev
, MUnit
.IQ_L
, (u64
)address
);
593 spin_lock_irqsave(&fib
->dev
->iq_lock
, flags
);
594 src_writel(dev
, MUnit
.IQ_H
,
595 upper_32_bits(address
) & 0xffffffff);
596 src_writel(dev
, MUnit
.IQ_L
, address
& 0xffffffff);
597 spin_unlock_irqrestore(&fib
->dev
->iq_lock
, flags
);
605 * @size: mapping resize request
608 static int aac_src_ioremap(struct aac_dev
*dev
, u32 size
)
611 iounmap(dev
->regs
.src
.bar1
);
612 dev
->regs
.src
.bar1
= NULL
;
613 iounmap(dev
->regs
.src
.bar0
);
614 dev
->base
= dev
->regs
.src
.bar0
= NULL
;
617 dev
->regs
.src
.bar1
= ioremap(pci_resource_start(dev
->pdev
, 2),
618 AAC_MIN_SRC_BAR1_SIZE
);
620 if (dev
->regs
.src
.bar1
== NULL
)
622 dev
->base
= dev
->regs
.src
.bar0
= ioremap(dev
->base_start
, size
);
623 if (dev
->base
== NULL
) {
624 iounmap(dev
->regs
.src
.bar1
);
625 dev
->regs
.src
.bar1
= NULL
;
628 dev
->IndexRegs
= &((struct src_registers __iomem
*)
629 dev
->base
)->u
.tupelo
.IndexRegs
;
635 * @size: mapping resize request
638 static int aac_srcv_ioremap(struct aac_dev
*dev
, u32 size
)
641 iounmap(dev
->regs
.src
.bar0
);
642 dev
->base
= dev
->regs
.src
.bar0
= NULL
;
647 ioremap(pci_resource_start(dev
->pdev
, 2), AAC_MIN_SRCV_BAR1_SIZE
);
649 if (dev
->regs
.src
.bar1
== NULL
)
651 dev
->base
= dev
->regs
.src
.bar0
= ioremap(dev
->base_start
, size
);
652 if (dev
->base
== NULL
) {
653 iounmap(dev
->regs
.src
.bar1
);
654 dev
->regs
.src
.bar1
= NULL
;
657 dev
->IndexRegs
= &((struct src_registers __iomem
*)
658 dev
->base
)->u
.denali
.IndexRegs
;
662 void aac_set_intx_mode(struct aac_dev
*dev
)
664 if (dev
->msi_enabled
) {
665 aac_src_access_devreg(dev
, AAC_ENABLE_INTX
);
666 dev
->msi_enabled
= 0;
667 msleep(5000); /* Delay 5 seconds */
671 static void aac_clear_omr(struct aac_dev
*dev
)
675 omr_value
= src_readl(dev
, MUnit
.OMR
);
678 * Check for PCI Errors or Kernel Panic
680 if ((omr_value
== INVALID_OMR
) || (omr_value
& KERNEL_PANIC
))
684 * Preserve MSIX Value if any
686 src_writel(dev
, MUnit
.OMR
, omr_value
& AAC_INT_MODE_MSIX
);
687 src_readl(dev
, MUnit
.OMR
);
690 static void aac_dump_fw_fib_iop_reset(struct aac_dev
*dev
)
692 __le32 supported_options3
;
697 supported_options3
= dev
->supplement_adapter_info
.supported_options3
;
698 if (!(supported_options3
& AAC_OPTION_SUPPORTED3_IOP_RESET_FIB_DUMP
))
701 aac_adapter_sync_cmd(dev
, IOP_RESET_FW_FIB_DUMP
,
702 0, 0, 0, 0, 0, 0, NULL
, NULL
, NULL
, NULL
, NULL
);
705 static bool aac_is_ctrl_up_and_running(struct aac_dev
*dev
)
708 unsigned long status
, start
;
714 status
= src_readl(dev
, MUnit
.OMR
);
716 if (status
== 0xffffffff)
719 if (status
& KERNEL_BOOTING
) {
724 if (time_after(jiffies
, start
+HZ
*SOFT_RESET_TIME
)) {
729 is_up
= status
& KERNEL_UP_AND_RUNNING
;
736 static void aac_src_drop_io(struct aac_dev
*dev
)
738 if (!dev
->soft_reset_support
)
741 aac_adapter_sync_cmd(dev
, DROP_IO
,
742 0, 0, 0, 0, 0, 0, NULL
, NULL
, NULL
, NULL
, NULL
);
745 static void aac_notify_fw_of_iop_reset(struct aac_dev
*dev
)
747 aac_adapter_sync_cmd(dev
, IOP_RESET_ALWAYS
, 0, 0, 0, 0, 0, 0, NULL
,
748 NULL
, NULL
, NULL
, NULL
);
749 aac_src_drop_io(dev
);
752 static void aac_send_iop_reset(struct aac_dev
*dev
)
754 aac_dump_fw_fib_iop_reset(dev
);
756 aac_notify_fw_of_iop_reset(dev
);
758 aac_set_intx_mode(dev
);
762 src_writel(dev
, MUnit
.IDR
, IOP_SRC_RESET_MASK
);
767 static void aac_send_hardware_soft_reset(struct aac_dev
*dev
)
772 val
= readl(((char *)(dev
->base
) + IBW_SWR_OFFSET
));
774 writel(val
, ((char *)(dev
->base
) + IBW_SWR_OFFSET
));
775 msleep_interruptible(20000);
778 static int aac_src_restart_adapter(struct aac_dev
*dev
, int bled
, u8 reset_type
)
787 dev_err(&dev
->pdev
->dev
, "adapter kernel panic'd %x.\n", bled
);
790 * When there is a BlinkLED, IOP_RESET has not effect
792 if (bled
>= 2 && dev
->sa_firmware
&& reset_type
& HW_IOP_RESET
)
793 reset_type
&= ~HW_IOP_RESET
;
795 dev
->a_ops
.adapter_enable_int
= aac_src_disable_interrupt
;
797 dev_err(&dev
->pdev
->dev
, "Controller reset type is %d\n", reset_type
);
799 if (reset_type
& HW_IOP_RESET
) {
800 dev_info(&dev
->pdev
->dev
, "Issuing IOP reset\n");
801 aac_send_iop_reset(dev
);
804 * Creates a delay or wait till up and running comes thru
806 is_ctrl_up
= aac_is_ctrl_up_and_running(dev
);
808 dev_err(&dev
->pdev
->dev
, "IOP reset failed\n");
810 dev_info(&dev
->pdev
->dev
, "IOP reset succeeded\n");
815 if (!dev
->sa_firmware
) {
816 dev_err(&dev
->pdev
->dev
, "ARC Reset attempt failed\n");
821 if (reset_type
& HW_SOFT_RESET
) {
822 dev_info(&dev
->pdev
->dev
, "Issuing SOFT reset\n");
823 aac_send_hardware_soft_reset(dev
);
824 dev
->msi_enabled
= 0;
826 is_ctrl_up
= aac_is_ctrl_up_and_running(dev
);
828 dev_err(&dev
->pdev
->dev
, "SOFT reset failed\n");
832 dev_info(&dev
->pdev
->dev
, "SOFT reset succeeded\n");
836 if (startup_timeout
< 300)
837 startup_timeout
= 300;
843 if (src_readl(dev
, MUnit
.OMR
) & KERNEL_PANIC
)
849 * aac_src_select_comm - Select communications method
851 * @comm: communications method
853 static int aac_src_select_comm(struct aac_dev
*dev
, int comm
)
856 case AAC_COMM_MESSAGE
:
857 dev
->a_ops
.adapter_intr
= aac_src_intr_message
;
858 dev
->a_ops
.adapter_deliver
= aac_src_deliver_message
;
867 * aac_src_init - initialize an Cardinal Frey Bar card
868 * @dev: device to configure
872 int aac_src_init(struct aac_dev
*dev
)
875 unsigned long status
;
877 int instance
= dev
->id
;
878 const char *name
= dev
->name
;
880 dev
->a_ops
.adapter_ioremap
= aac_src_ioremap
;
881 dev
->a_ops
.adapter_comm
= aac_src_select_comm
;
883 dev
->base_size
= AAC_MIN_SRC_BAR0_SIZE
;
884 if (aac_adapter_ioremap(dev
, dev
->base_size
)) {
885 printk(KERN_WARNING
"%s: unable to map adapter.\n", name
);
889 /* Failure to reset here is an option ... */
890 dev
->a_ops
.adapter_sync_cmd
= src_sync_cmd
;
891 dev
->a_ops
.adapter_enable_int
= aac_src_disable_interrupt
;
893 if (dev
->init_reset
) {
894 dev
->init_reset
= false;
895 if (!aac_src_restart_adapter(dev
, 0, IOP_HWSOFT_RESET
))
900 * Check to see if the board panic'd while booting.
902 status
= src_readl(dev
, MUnit
.OMR
);
903 if (status
& KERNEL_PANIC
) {
904 if (aac_src_restart_adapter(dev
,
905 aac_src_check_health(dev
), IOP_HWSOFT_RESET
))
910 * Check to see if the board failed any self tests.
912 status
= src_readl(dev
, MUnit
.OMR
);
913 if (status
& SELF_TEST_FAILED
) {
914 printk(KERN_ERR
"%s%d: adapter self-test failed.\n",
915 dev
->name
, instance
);
919 * Check to see if the monitor panic'd while booting.
921 if (status
& MONITOR_PANIC
) {
922 printk(KERN_ERR
"%s%d: adapter monitor panic.\n",
923 dev
->name
, instance
);
928 * Wait for the adapter to be up and running. Wait up to 3 minutes
930 while (!((status
= src_readl(dev
, MUnit
.OMR
)) &
931 KERNEL_UP_AND_RUNNING
)) {
933 (status
& (KERNEL_PANIC
|SELF_TEST_FAILED
|MONITOR_PANIC
))) ||
934 time_after(jiffies
, start
+HZ
*startup_timeout
)) {
935 printk(KERN_ERR
"%s%d: adapter kernel failed to start, init status = %lx.\n",
936 dev
->name
, instance
, status
);
940 ((status
& (KERNEL_PANIC
|SELF_TEST_FAILED
|MONITOR_PANIC
)) ||
941 time_after(jiffies
, start
+ HZ
*
942 ((startup_timeout
> 60)
943 ? (startup_timeout
- 60)
944 : (startup_timeout
/ 2))))) {
945 if (likely(!aac_src_restart_adapter(dev
,
946 aac_src_check_health(dev
), IOP_HWSOFT_RESET
)))
952 if (restart
&& aac_commit
)
955 * Fill in the common function dispatch table.
957 dev
->a_ops
.adapter_interrupt
= aac_src_interrupt_adapter
;
958 dev
->a_ops
.adapter_disable_int
= aac_src_disable_interrupt
;
959 dev
->a_ops
.adapter_enable_int
= aac_src_disable_interrupt
;
960 dev
->a_ops
.adapter_notify
= aac_src_notify_adapter
;
961 dev
->a_ops
.adapter_sync_cmd
= src_sync_cmd
;
962 dev
->a_ops
.adapter_check_health
= aac_src_check_health
;
963 dev
->a_ops
.adapter_restart
= aac_src_restart_adapter
;
964 dev
->a_ops
.adapter_start
= aac_src_start_adapter
;
967 * First clear out all interrupts. Then enable the one's that we
970 aac_adapter_comm(dev
, AAC_COMM_MESSAGE
);
971 aac_adapter_disable_int(dev
);
972 src_writel(dev
, MUnit
.ODR_C
, 0xffffffff);
973 aac_adapter_enable_int(dev
);
975 if (aac_init_adapter(dev
) == NULL
)
977 if (dev
->comm_interface
!= AAC_COMM_MESSAGE_TYPE1
)
980 dev
->msi
= !pci_enable_msi(dev
->pdev
);
982 dev
->aac_msix
[0].vector_no
= 0;
983 dev
->aac_msix
[0].dev
= dev
;
985 if (request_irq(dev
->pdev
->irq
, dev
->a_ops
.adapter_intr
,
986 IRQF_SHARED
, "aacraid", &(dev
->aac_msix
[0])) < 0) {
989 pci_disable_msi(dev
->pdev
);
991 printk(KERN_ERR
"%s%d: Interrupt unavailable.\n",
995 dev
->dbg_base
= pci_resource_start(dev
->pdev
, 2);
996 dev
->dbg_base_mapped
= dev
->regs
.src
.bar1
;
997 dev
->dbg_size
= AAC_MIN_SRC_BAR1_SIZE
;
998 dev
->a_ops
.adapter_enable_int
= aac_src_enable_interrupt_message
;
1000 aac_adapter_enable_int(dev
);
1002 if (!dev
->sync_mode
) {
1004 * Tell the adapter that all is configured, and it can
1005 * start accepting requests
1007 aac_src_start_adapter(dev
);
1016 static int aac_src_wait_sync(struct aac_dev
*dev
, int *status
)
1018 unsigned long start
= jiffies
;
1019 unsigned long usecs
= 0;
1023 while (time_before(jiffies
, start
+delay
)) {
1025 * Delay 5 microseconds to let Mon960 get info.
1030 * Mon960 will set doorbell0 bit when it has completed the
1033 if (aac_src_get_sync_status(dev
) & OUTBOUNDDOORBELL_0
) {
1035 * Clear: the doorbell.
1037 if (dev
->msi_enabled
)
1038 aac_src_access_devreg(dev
, AAC_CLEAR_SYNC_BIT
);
1040 src_writel(dev
, MUnit
.ODR_C
,
1041 OUTBOUNDDOORBELL_0
<< SRC_ODR_SHIFT
);
1048 * Yield the processor in case we are slow
1050 usecs
= 1 * USEC_PER_MSEC
;
1051 usleep_range(usecs
, usecs
+ 50);
1054 * Pull the synch status from Mailbox 0.
1056 if (status
&& !rc
) {
1057 status
[0] = readl(&dev
->IndexRegs
->Mailbox
[0]);
1058 status
[1] = readl(&dev
->IndexRegs
->Mailbox
[1]);
1059 status
[2] = readl(&dev
->IndexRegs
->Mailbox
[2]);
1060 status
[3] = readl(&dev
->IndexRegs
->Mailbox
[3]);
1061 status
[4] = readl(&dev
->IndexRegs
->Mailbox
[4]);
1068 * aac_src_soft_reset - perform soft reset to speed up
1071 * Assumptions: That the controller is in a state where we can
1072 * bring it back to life with an init struct. We can only use
1073 * fast sync commands, as the timeout is 5 seconds.
1075 * @dev: device to configure
1079 static int aac_src_soft_reset(struct aac_dev
*dev
)
1081 u32 status_omr
= src_readl(dev
, MUnit
.OMR
);
1085 char *state_str
[7] = {
1086 "GET_ADAPTER_PROPERTIES Failed",
1087 "GET_ADAPTER_PROPERTIES timeout",
1088 "SOFT_RESET not supported",
1091 "Check Health failed"
1094 if (status_omr
== INVALID_OMR
)
1095 return 1; // pcie hosed
1097 if (!(status_omr
& KERNEL_UP_AND_RUNNING
))
1098 return 1; // not up and running
1101 * We go into soft reset mode to allow us to handle response
1103 dev
->in_soft_reset
= 1;
1104 dev
->msi_enabled
= status_omr
& AAC_INT_MODE_MSIX
;
1106 /* Get adapter properties */
1107 rc
= aac_adapter_sync_cmd(dev
, GET_ADAPTER_PROPERTIES
, 0, 0, 0,
1108 0, 0, 0, status
+0, status
+1, status
+2, status
+3, status
+4);
1113 if (aac_src_wait_sync(dev
, status
)) {
1119 if (!(status
[1] & le32_to_cpu(AAC_OPT_EXTENDED
) &&
1120 (status
[4] & le32_to_cpu(AAC_EXTOPT_SOFT_RESET
)))) {
1125 if ((status
[1] & le32_to_cpu(AAC_OPT_EXTENDED
)) &&
1126 (status
[4] & le32_to_cpu(AAC_EXTOPT_SA_FIRMWARE
)))
1127 dev
->sa_firmware
= 1;
1130 rc
= aac_adapter_sync_cmd(dev
, DROP_IO
, 0, 0, 0, 0, 0, 0,
1131 status
+0, status
+1, status
+2, status
+3, status
+4);
1137 if (aac_src_wait_sync(dev
, status
)) {
1143 dev_err(&dev
->pdev
->dev
, "%s: %d outstanding I/O pending\n",
1144 __func__
, status
[1]);
1147 rc
= aac_src_check_health(dev
);
1150 dev
->in_soft_reset
= 0;
1151 dev
->msi_enabled
= 0;
1153 dev_err(&dev
->pdev
->dev
, "%s: %s status = %d", __func__
,
1154 state_str
[state
], rc
);
1159 * aac_srcv_init - initialize an SRCv card
1160 * @dev: device to configure
1164 int aac_srcv_init(struct aac_dev
*dev
)
1166 unsigned long start
;
1167 unsigned long status
;
1169 int instance
= dev
->id
;
1170 const char *name
= dev
->name
;
1172 dev
->a_ops
.adapter_ioremap
= aac_srcv_ioremap
;
1173 dev
->a_ops
.adapter_comm
= aac_src_select_comm
;
1175 dev
->base_size
= AAC_MIN_SRCV_BAR0_SIZE
;
1176 if (aac_adapter_ioremap(dev
, dev
->base_size
)) {
1177 printk(KERN_WARNING
"%s: unable to map adapter.\n", name
);
1181 /* Failure to reset here is an option ... */
1182 dev
->a_ops
.adapter_sync_cmd
= src_sync_cmd
;
1183 dev
->a_ops
.adapter_enable_int
= aac_src_disable_interrupt
;
1185 if (dev
->init_reset
) {
1186 dev
->init_reset
= false;
1187 if (aac_src_soft_reset(dev
)) {
1188 aac_src_restart_adapter(dev
, 0, IOP_HWSOFT_RESET
);
1194 * Check to see if flash update is running.
1195 * Wait for the adapter to be up and running. Wait up to 5 minutes
1197 status
= src_readl(dev
, MUnit
.OMR
);
1198 if (status
& FLASH_UPD_PENDING
) {
1201 status
= src_readl(dev
, MUnit
.OMR
);
1202 if (time_after(jiffies
, start
+HZ
*FWUPD_TIMEOUT
)) {
1203 printk(KERN_ERR
"%s%d: adapter flash update failed.\n",
1204 dev
->name
, instance
);
1207 } while (!(status
& FLASH_UPD_SUCCESS
) &&
1208 !(status
& FLASH_UPD_FAILED
));
1209 /* Delay 10 seconds.
1210 * Because right now FW is doing a soft reset,
1211 * do not read scratch pad register at this time
1216 * Check to see if the board panic'd while booting.
1218 status
= src_readl(dev
, MUnit
.OMR
);
1219 if (status
& KERNEL_PANIC
) {
1220 if (aac_src_restart_adapter(dev
,
1221 aac_src_check_health(dev
), IOP_HWSOFT_RESET
))
1226 * Check to see if the board failed any self tests.
1228 status
= src_readl(dev
, MUnit
.OMR
);
1229 if (status
& SELF_TEST_FAILED
) {
1230 printk(KERN_ERR
"%s%d: adapter self-test failed.\n", dev
->name
, instance
);
1234 * Check to see if the monitor panic'd while booting.
1236 if (status
& MONITOR_PANIC
) {
1237 printk(KERN_ERR
"%s%d: adapter monitor panic.\n", dev
->name
, instance
);
1243 * Wait for the adapter to be up and running. Wait up to 3 minutes
1246 status
= src_readl(dev
, MUnit
.OMR
);
1247 if (status
== INVALID_OMR
)
1251 (status
& (KERNEL_PANIC
|SELF_TEST_FAILED
|MONITOR_PANIC
))) ||
1252 time_after(jiffies
, start
+HZ
*startup_timeout
)) {
1253 printk(KERN_ERR
"%s%d: adapter kernel failed to start, init status = %lx.\n",
1254 dev
->name
, instance
, status
);
1258 ((status
& (KERNEL_PANIC
|SELF_TEST_FAILED
|MONITOR_PANIC
)) ||
1259 time_after(jiffies
, start
+ HZ
*
1260 ((startup_timeout
> 60)
1261 ? (startup_timeout
- 60)
1262 : (startup_timeout
/ 2))))) {
1263 if (likely(!aac_src_restart_adapter(dev
,
1264 aac_src_check_health(dev
), IOP_HWSOFT_RESET
)))
1269 } while (!(status
& KERNEL_UP_AND_RUNNING
));
1271 if (restart
&& aac_commit
)
1274 * Fill in the common function dispatch table.
1276 dev
->a_ops
.adapter_interrupt
= aac_src_interrupt_adapter
;
1277 dev
->a_ops
.adapter_disable_int
= aac_src_disable_interrupt
;
1278 dev
->a_ops
.adapter_enable_int
= aac_src_disable_interrupt
;
1279 dev
->a_ops
.adapter_notify
= aac_src_notify_adapter
;
1280 dev
->a_ops
.adapter_sync_cmd
= src_sync_cmd
;
1281 dev
->a_ops
.adapter_check_health
= aac_src_check_health
;
1282 dev
->a_ops
.adapter_restart
= aac_src_restart_adapter
;
1283 dev
->a_ops
.adapter_start
= aac_src_start_adapter
;
1286 * First clear out all interrupts. Then enable the one's that we
1289 aac_adapter_comm(dev
, AAC_COMM_MESSAGE
);
1290 aac_adapter_disable_int(dev
);
1291 src_writel(dev
, MUnit
.ODR_C
, 0xffffffff);
1292 aac_adapter_enable_int(dev
);
1294 if (aac_init_adapter(dev
) == NULL
)
1296 if ((dev
->comm_interface
!= AAC_COMM_MESSAGE_TYPE2
) &&
1297 (dev
->comm_interface
!= AAC_COMM_MESSAGE_TYPE3
))
1299 if (dev
->msi_enabled
)
1300 aac_src_access_devreg(dev
, AAC_ENABLE_MSIX
);
1302 if (aac_acquire_irq(dev
))
1305 dev
->dbg_base
= pci_resource_start(dev
->pdev
, 2);
1306 dev
->dbg_base_mapped
= dev
->regs
.src
.bar1
;
1307 dev
->dbg_size
= AAC_MIN_SRCV_BAR1_SIZE
;
1308 dev
->a_ops
.adapter_enable_int
= aac_src_enable_interrupt_message
;
1310 aac_adapter_enable_int(dev
);
1312 if (!dev
->sync_mode
) {
1314 * Tell the adapter that all is configured, and it can
1315 * start accepting requests
1317 aac_src_start_adapter(dev
);
1326 void aac_src_access_devreg(struct aac_dev
*dev
, int mode
)
1331 case AAC_ENABLE_INTERRUPT
:
1334 dev
->OIMR
= (dev
->msi_enabled
?
1335 AAC_INT_ENABLE_TYPE1_MSIX
:
1336 AAC_INT_ENABLE_TYPE1_INTX
));
1339 case AAC_DISABLE_INTERRUPT
:
1342 dev
->OIMR
= AAC_INT_DISABLE_ALL
);
1345 case AAC_ENABLE_MSIX
:
1347 val
= src_readl(dev
, MUnit
.IDR
);
1349 src_writel(dev
, MUnit
.IDR
, val
);
1350 src_readl(dev
, MUnit
.IDR
);
1352 val
= PMC_ALL_INTERRUPT_BITS
;
1353 src_writel(dev
, MUnit
.IOAR
, val
);
1354 val
= src_readl(dev
, MUnit
.OIMR
);
1357 val
& (~(PMC_GLOBAL_INT_BIT2
| PMC_GLOBAL_INT_BIT0
)));
1360 case AAC_DISABLE_MSIX
:
1362 val
= src_readl(dev
, MUnit
.IDR
);
1364 src_writel(dev
, MUnit
.IDR
, val
);
1365 src_readl(dev
, MUnit
.IDR
);
1368 case AAC_CLEAR_AIF_BIT
:
1370 val
= src_readl(dev
, MUnit
.IDR
);
1372 src_writel(dev
, MUnit
.IDR
, val
);
1373 src_readl(dev
, MUnit
.IDR
);
1376 case AAC_CLEAR_SYNC_BIT
:
1378 val
= src_readl(dev
, MUnit
.IDR
);
1380 src_writel(dev
, MUnit
.IDR
, val
);
1381 src_readl(dev
, MUnit
.IDR
);
1384 case AAC_ENABLE_INTX
:
1386 val
= src_readl(dev
, MUnit
.IDR
);
1388 src_writel(dev
, MUnit
.IDR
, val
);
1389 src_readl(dev
, MUnit
.IDR
);
1391 val
= PMC_ALL_INTERRUPT_BITS
;
1392 src_writel(dev
, MUnit
.IOAR
, val
);
1393 src_readl(dev
, MUnit
.IOAR
);
1394 val
= src_readl(dev
, MUnit
.OIMR
);
1395 src_writel(dev
, MUnit
.OIMR
,
1396 val
& (~(PMC_GLOBAL_INT_BIT2
)));
1404 static int aac_src_get_sync_status(struct aac_dev
*dev
)
1409 msix_val
= src_readl(dev
, MUnit
.ODR_MSI
) & SRC_MSI_READ_MASK
? 1 : 0;
1411 if (!dev
->msi_enabled
) {
1413 * if Legacy int status indicates cmd is not complete
1414 * sample MSIx register to see if it indiactes cmd complete,
1415 * if yes set the controller in MSIx mode and consider cmd
1418 legacy_val
= src_readl(dev
, MUnit
.ODR_R
) >> SRC_ODR_SHIFT
;
1419 if (!(legacy_val
& 1) && msix_val
)
1420 dev
->msi_enabled
= 1;