1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Adaptec AAC series RAID controller driver
4 * (c) Copyright 2001 Red Hat Inc.
6 * based on the old aacraid driver that is..
7 * Adaptec aacraid device driver for Linux.
9 * Copyright (c) 2000-2010 Adaptec, Inc.
10 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
11 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
16 * Abstract: Hardware Device Interface for PMC SRC based controllers
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/types.h>
22 #include <linux/pci.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/blkdev.h>
26 #include <linux/delay.h>
27 #include <linux/completion.h>
28 #include <linux/time.h>
29 #include <linux/interrupt.h>
30 #include <scsi/scsi_host.h>
34 static int aac_src_get_sync_status(struct aac_dev
*dev
);
36 static irqreturn_t
aac_src_intr_message(int irq
, void *dev_id
)
38 struct aac_msix_ctx
*ctx
;
40 unsigned long bellbits
, bellbits_shifted
;
42 int isFastResponse
, mode
;
45 ctx
= (struct aac_msix_ctx
*)dev_id
;
47 vector_no
= ctx
->vector_no
;
49 if (dev
->msi_enabled
) {
50 mode
= AAC_INT_MODE_MSI
;
52 bellbits
= src_readl(dev
, MUnit
.ODR_MSI
);
53 if (bellbits
& 0x40000)
54 mode
|= AAC_INT_MODE_AIF
;
55 if (bellbits
& 0x1000)
56 mode
|= AAC_INT_MODE_SYNC
;
59 mode
= AAC_INT_MODE_INTX
;
60 bellbits
= src_readl(dev
, MUnit
.ODR_R
);
61 if (bellbits
& PmDoorBellResponseSent
) {
62 bellbits
= PmDoorBellResponseSent
;
63 src_writel(dev
, MUnit
.ODR_C
, bellbits
);
64 src_readl(dev
, MUnit
.ODR_C
);
66 bellbits_shifted
= (bellbits
>> SRC_ODR_SHIFT
);
67 src_writel(dev
, MUnit
.ODR_C
, bellbits
);
68 src_readl(dev
, MUnit
.ODR_C
);
70 if (bellbits_shifted
& DoorBellAifPending
)
71 mode
|= AAC_INT_MODE_AIF
;
72 else if (bellbits_shifted
& OUTBOUNDDOORBELL_0
)
73 mode
|= AAC_INT_MODE_SYNC
;
77 if (mode
& AAC_INT_MODE_SYNC
) {
79 struct list_head
*entry
;
81 extern int aac_sync_mode
;
83 if (!aac_sync_mode
&& !dev
->msi_enabled
) {
84 src_writel(dev
, MUnit
.ODR_C
, bellbits
);
85 src_readl(dev
, MUnit
.ODR_C
);
89 if (dev
->sync_fib
->callback
)
90 dev
->sync_fib
->callback(dev
->sync_fib
->callback_data
,
92 spin_lock_irqsave(&dev
->sync_fib
->event_lock
, sflags
);
93 if (dev
->sync_fib
->flags
& FIB_CONTEXT_FLAG_WAIT
) {
94 dev
->management_fib_count
--;
95 complete(&dev
->sync_fib
->event_wait
);
97 spin_unlock_irqrestore(&dev
->sync_fib
->event_lock
,
99 spin_lock_irqsave(&dev
->sync_lock
, sflags
);
100 if (!list_empty(&dev
->sync_fib_list
)) {
101 entry
= dev
->sync_fib_list
.next
;
102 dev
->sync_fib
= list_entry(entry
,
108 dev
->sync_fib
= NULL
;
110 spin_unlock_irqrestore(&dev
->sync_lock
, sflags
);
112 aac_adapter_sync_cmd(dev
, SEND_SYNCHRONOUS_FIB
,
113 (u32
)dev
->sync_fib
->hw_fib_pa
,
115 NULL
, NULL
, NULL
, NULL
, NULL
);
118 if (!dev
->msi_enabled
)
123 if (mode
& AAC_INT_MODE_AIF
) {
125 if (dev
->sa_firmware
) {
126 u32 events
= src_readl(dev
, MUnit
.SCR0
);
128 aac_intr_normal(dev
, events
, 1, 0, NULL
);
129 writel(events
, &dev
->IndexRegs
->Mailbox
[0]);
130 src_writel(dev
, MUnit
.IDR
, 1 << 23);
132 if (dev
->aif_thread
&& dev
->fsa_dev
)
133 aac_intr_normal(dev
, 0, 2, 0, NULL
);
135 if (dev
->msi_enabled
)
136 aac_src_access_devreg(dev
, AAC_CLEAR_AIF_BIT
);
141 index
= dev
->host_rrq_idx
[vector_no
];
145 /* remove toggle bit (31) */
146 handle
= le32_to_cpu((dev
->host_rrq
[index
])
148 /* check fast response bits (30, 1) */
149 if (handle
& 0x40000000)
151 handle
&= 0x0000ffff;
155 if (dev
->msi_enabled
&& dev
->max_msix
> 1)
156 atomic_dec(&dev
->rrq_outstanding
[vector_no
]);
157 aac_intr_normal(dev
, handle
, 0, isFastResponse
, NULL
);
158 dev
->host_rrq
[index
++] = 0;
159 if (index
== (vector_no
+ 1) * dev
->vector_cap
)
160 index
= vector_no
* dev
->vector_cap
;
161 dev
->host_rrq_idx
[vector_no
] = index
;
170 * aac_src_disable_interrupt - Disable interrupts
174 static void aac_src_disable_interrupt(struct aac_dev
*dev
)
176 src_writel(dev
, MUnit
.OIMR
, dev
->OIMR
= 0xffffffff);
180 * aac_src_enable_interrupt_message - Enable interrupts
184 static void aac_src_enable_interrupt_message(struct aac_dev
*dev
)
186 aac_src_access_devreg(dev
, AAC_ENABLE_INTERRUPT
);
190 * src_sync_cmd - send a command and wait
192 * @command: Command to execute
193 * @p1: first parameter
194 * @p2: second parameter
195 * @p3: third parameter
196 * @p4: forth parameter
197 * @p5: fifth parameter
198 * @p6: sixth parameter
199 * @status: adapter status
200 * @r1: first return value
201 * @r2: second return valu
202 * @r3: third return value
203 * @r4: forth return value
205 * This routine will send a synchronous command to the adapter and wait
206 * for its completion.
209 static int src_sync_cmd(struct aac_dev
*dev
, u32 command
,
210 u32 p1
, u32 p2
, u32 p3
, u32 p4
, u32 p5
, u32 p6
,
211 u32
*status
, u32
* r1
, u32
* r2
, u32
* r3
, u32
* r4
)
218 * Write the command into Mailbox 0
220 writel(command
, &dev
->IndexRegs
->Mailbox
[0]);
222 * Write the parameters into Mailboxes 1 - 6
224 writel(p1
, &dev
->IndexRegs
->Mailbox
[1]);
225 writel(p2
, &dev
->IndexRegs
->Mailbox
[2]);
226 writel(p3
, &dev
->IndexRegs
->Mailbox
[3]);
227 writel(p4
, &dev
->IndexRegs
->Mailbox
[4]);
230 * Clear the synch command doorbell to start on a clean slate.
232 if (!dev
->msi_enabled
)
235 OUTBOUNDDOORBELL_0
<< SRC_ODR_SHIFT
);
238 * Disable doorbell interrupts
240 src_writel(dev
, MUnit
.OIMR
, dev
->OIMR
= 0xffffffff);
243 * Force the completion of the mask register write before issuing
246 src_readl(dev
, MUnit
.OIMR
);
249 * Signal that there is a new synch command
251 src_writel(dev
, MUnit
.IDR
, INBOUNDDOORBELL_0
<< SRC_IDR_SHIFT
);
253 if ((!dev
->sync_mode
|| command
!= SEND_SYNCHRONOUS_FIB
) &&
254 !dev
->in_soft_reset
) {
258 if (command
== IOP_RESET_ALWAYS
) {
259 /* Wait up to 10 sec */
262 /* Wait up to 5 minutes */
265 while (time_before(jiffies
, start
+delay
)) {
266 udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
268 * Mon960 will set doorbell0 bit when it has completed the command.
270 if (aac_src_get_sync_status(dev
) & OUTBOUNDDOORBELL_0
) {
272 * Clear the doorbell.
274 if (dev
->msi_enabled
)
275 aac_src_access_devreg(dev
,
280 OUTBOUNDDOORBELL_0
<< SRC_ODR_SHIFT
);
285 * Yield the processor in case we are slow
289 if (unlikely(ok
!= 1)) {
291 * Restore interrupt mask even though we timed out
293 aac_adapter_enable_int(dev
);
297 * Pull the synch status from Mailbox 0.
300 *status
= readl(&dev
->IndexRegs
->Mailbox
[0]);
302 *r1
= readl(&dev
->IndexRegs
->Mailbox
[1]);
304 *r2
= readl(&dev
->IndexRegs
->Mailbox
[2]);
306 *r3
= readl(&dev
->IndexRegs
->Mailbox
[3]);
308 *r4
= readl(&dev
->IndexRegs
->Mailbox
[4]);
309 if (command
== GET_COMM_PREFERRED_SETTINGS
)
311 readl(&dev
->IndexRegs
->Mailbox
[5]) & 0xFFFF;
313 * Clear the synch command doorbell.
315 if (!dev
->msi_enabled
)
318 OUTBOUNDDOORBELL_0
<< SRC_ODR_SHIFT
);
322 * Restore interrupt mask
324 aac_adapter_enable_int(dev
);
329 * aac_src_interrupt_adapter - interrupt adapter
332 * Send an interrupt to the i960 and breakpoint it.
335 static void aac_src_interrupt_adapter(struct aac_dev
*dev
)
337 src_sync_cmd(dev
, BREAKPOINT_REQUEST
,
339 NULL
, NULL
, NULL
, NULL
, NULL
);
343 * aac_src_notify_adapter - send an event to the adapter
345 * @event: Event to send
347 * Notify the i960 that something it probably cares about has
351 static void aac_src_notify_adapter(struct aac_dev
*dev
, u32 event
)
356 src_writel(dev
, MUnit
.ODR_C
,
357 INBOUNDDOORBELL_1
<< SRC_ODR_SHIFT
);
359 case HostNormRespNotFull
:
360 src_writel(dev
, MUnit
.ODR_C
,
361 INBOUNDDOORBELL_4
<< SRC_ODR_SHIFT
);
363 case AdapNormRespQue
:
364 src_writel(dev
, MUnit
.ODR_C
,
365 INBOUNDDOORBELL_2
<< SRC_ODR_SHIFT
);
367 case HostNormCmdNotFull
:
368 src_writel(dev
, MUnit
.ODR_C
,
369 INBOUNDDOORBELL_3
<< SRC_ODR_SHIFT
);
372 src_writel(dev
, MUnit
.ODR_C
,
373 INBOUNDDOORBELL_6
<< SRC_ODR_SHIFT
);
376 src_writel(dev
, MUnit
.ODR_C
,
377 INBOUNDDOORBELL_5
<< SRC_ODR_SHIFT
);
386 * aac_src_start_adapter - activate adapter
389 * Start up processing on an i960 based AAC adapter
392 static void aac_src_start_adapter(struct aac_dev
*dev
)
394 union aac_init
*init
;
397 /* reset host_rrq_idx first */
398 for (i
= 0; i
< dev
->max_msix
; i
++) {
399 dev
->host_rrq_idx
[i
] = i
* dev
->vector_cap
;
400 atomic_set(&dev
->rrq_outstanding
[i
], 0);
402 atomic_set(&dev
->msix_counter
, 0);
403 dev
->fibs_pushed_no
= 0;
406 if (dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE3
) {
407 init
->r8
.host_elapsed_seconds
=
408 cpu_to_le32(ktime_get_real_seconds());
409 src_sync_cmd(dev
, INIT_STRUCT_BASE_ADDRESS
,
410 lower_32_bits(dev
->init_pa
),
411 upper_32_bits(dev
->init_pa
),
413 (AAC_MAX_HRRQ
- 1) * sizeof(struct _rrq
),
414 0, 0, 0, NULL
, NULL
, NULL
, NULL
, NULL
);
416 init
->r7
.host_elapsed_seconds
=
417 cpu_to_le32(ktime_get_real_seconds());
418 // We can only use a 32 bit address here
419 src_sync_cmd(dev
, INIT_STRUCT_BASE_ADDRESS
,
420 (u32
)(ulong
)dev
->init_pa
, 0, 0, 0, 0, 0,
421 NULL
, NULL
, NULL
, NULL
, NULL
);
427 * aac_src_check_health
428 * @dev: device to check if healthy
430 * Will attempt to determine if the specified adapter is alive and
431 * capable of handling requests, returning 0 if alive.
433 static int aac_src_check_health(struct aac_dev
*dev
)
435 u32 status
= src_readl(dev
, MUnit
.OMR
);
438 * Check to see if the board panic'd.
440 if (unlikely(status
& KERNEL_PANIC
))
444 * Check to see if the board failed any self tests.
446 if (unlikely(status
& SELF_TEST_FAILED
))
450 * Check to see if the board failed any self tests.
452 if (unlikely(status
& MONITOR_PANIC
))
456 * Wait for the adapter to be up and running.
458 if (unlikely(!(status
& KERNEL_UP_AND_RUNNING
)))
469 return (status
>> 16) & 0xFF;
472 static inline u32
aac_get_vector(struct aac_dev
*dev
)
474 return atomic_inc_return(&dev
->msix_counter
)%dev
->max_msix
;
478 * aac_src_deliver_message
481 * Will send a fib, returning 0 if successful.
483 static int aac_src_deliver_message(struct fib
*fib
)
485 struct aac_dev
*dev
= fib
->dev
;
486 struct aac_queue
*q
= &dev
->queues
->queue
[AdapNormCmdQueue
];
489 struct aac_fib_xporthdr
*pFibX
;
497 atomic_inc(&q
->numpending
);
499 native_hba
= (fib
->flags
& FIB_CONTEXT_FLAG_NATIVE_HBA
) ? 1 : 0;
502 if (dev
->msi_enabled
&& dev
->max_msix
> 1 &&
503 (native_hba
|| fib
->hw_fib_va
->header
.Command
!= AifRequest
)) {
505 if ((dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE3
)
507 vector_no
= aac_get_vector(dev
);
509 vector_no
= fib
->vector_no
;
512 if (fib
->flags
& FIB_CONTEXT_FLAG_NATIVE_HBA_TMF
) {
513 struct aac_hba_tm_req
*tm_req
;
515 tm_req
= (struct aac_hba_tm_req
*)
517 if (tm_req
->iu_type
==
518 HBA_IU_TYPE_SCSI_TM_REQ
) {
519 ((struct aac_hba_tm_req
*)
520 fib
->hw_fib_va
)->reply_qid
522 ((struct aac_hba_tm_req
*)
523 fib
->hw_fib_va
)->request_id
524 += (vector_no
<< 16);
526 ((struct aac_hba_reset_req
*)
527 fib
->hw_fib_va
)->reply_qid
529 ((struct aac_hba_reset_req
*)
530 fib
->hw_fib_va
)->request_id
531 += (vector_no
<< 16);
534 ((struct aac_hba_cmd_req
*)
535 fib
->hw_fib_va
)->reply_qid
537 ((struct aac_hba_cmd_req
*)
538 fib
->hw_fib_va
)->request_id
539 += (vector_no
<< 16);
542 fib
->hw_fib_va
->header
.Handle
+= (vector_no
<< 16);
548 atomic_inc(&dev
->rrq_outstanding
[vector_no
]);
551 address
= fib
->hw_fib_pa
;
552 fibsize
= (fib
->hbacmd_size
+ 127) / 128 - 1;
557 src_writeq(dev
, MUnit
.IQN_L
, (u64
)address
);
559 spin_lock_irqsave(&fib
->dev
->iq_lock
, flags
);
560 src_writel(dev
, MUnit
.IQN_H
,
561 upper_32_bits(address
) & 0xffffffff);
562 src_writel(dev
, MUnit
.IQN_L
, address
& 0xffffffff);
563 spin_unlock_irqrestore(&fib
->dev
->iq_lock
, flags
);
566 if (dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE2
||
567 dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE3
) {
568 /* Calculate the amount to the fibsize bits */
569 fibsize
= (le16_to_cpu(fib
->hw_fib_va
->header
.Size
)
571 /* New FIB header, 32-bit */
572 address
= fib
->hw_fib_pa
;
573 fib
->hw_fib_va
->header
.StructType
= FIB_MAGIC2
;
574 fib
->hw_fib_va
->header
.SenderFibAddress
=
575 cpu_to_le32((u32
)address
);
576 fib
->hw_fib_va
->header
.u
.TimeStamp
= 0;
577 WARN_ON(upper_32_bits(address
) != 0L);
579 /* Calculate the amount to the fibsize bits */
580 fibsize
= (sizeof(struct aac_fib_xporthdr
) +
581 le16_to_cpu(fib
->hw_fib_va
->header
.Size
)
583 /* Fill XPORT header */
584 pFibX
= (struct aac_fib_xporthdr
*)
585 ((unsigned char *)fib
->hw_fib_va
-
586 sizeof(struct aac_fib_xporthdr
));
587 pFibX
->Handle
= fib
->hw_fib_va
->header
.Handle
;
589 cpu_to_le64((u64
)fib
->hw_fib_pa
);
590 pFibX
->Size
= cpu_to_le32(
591 le16_to_cpu(fib
->hw_fib_va
->header
.Size
));
592 address
= fib
->hw_fib_pa
-
593 (u64
)sizeof(struct aac_fib_xporthdr
);
600 src_writeq(dev
, MUnit
.IQ_L
, (u64
)address
);
602 spin_lock_irqsave(&fib
->dev
->iq_lock
, flags
);
603 src_writel(dev
, MUnit
.IQ_H
,
604 upper_32_bits(address
) & 0xffffffff);
605 src_writel(dev
, MUnit
.IQ_L
, address
& 0xffffffff);
606 spin_unlock_irqrestore(&fib
->dev
->iq_lock
, flags
);
614 * @dev: device ioremap
615 * @size: mapping resize request
618 static int aac_src_ioremap(struct aac_dev
*dev
, u32 size
)
621 iounmap(dev
->regs
.src
.bar1
);
622 dev
->regs
.src
.bar1
= NULL
;
623 iounmap(dev
->regs
.src
.bar0
);
624 dev
->base
= dev
->regs
.src
.bar0
= NULL
;
627 dev
->regs
.src
.bar1
= ioremap(pci_resource_start(dev
->pdev
, 2),
628 AAC_MIN_SRC_BAR1_SIZE
);
630 if (dev
->regs
.src
.bar1
== NULL
)
632 dev
->base
= dev
->regs
.src
.bar0
= ioremap(dev
->base_start
, size
);
633 if (dev
->base
== NULL
) {
634 iounmap(dev
->regs
.src
.bar1
);
635 dev
->regs
.src
.bar1
= NULL
;
638 dev
->IndexRegs
= &((struct src_registers __iomem
*)
639 dev
->base
)->u
.tupelo
.IndexRegs
;
645 * @dev: device ioremap
646 * @size: mapping resize request
649 static int aac_srcv_ioremap(struct aac_dev
*dev
, u32 size
)
652 iounmap(dev
->regs
.src
.bar0
);
653 dev
->base
= dev
->regs
.src
.bar0
= NULL
;
658 ioremap(pci_resource_start(dev
->pdev
, 2), AAC_MIN_SRCV_BAR1_SIZE
);
660 if (dev
->regs
.src
.bar1
== NULL
)
662 dev
->base
= dev
->regs
.src
.bar0
= ioremap(dev
->base_start
, size
);
663 if (dev
->base
== NULL
) {
664 iounmap(dev
->regs
.src
.bar1
);
665 dev
->regs
.src
.bar1
= NULL
;
668 dev
->IndexRegs
= &((struct src_registers __iomem
*)
669 dev
->base
)->u
.denali
.IndexRegs
;
673 void aac_set_intx_mode(struct aac_dev
*dev
)
675 if (dev
->msi_enabled
) {
676 aac_src_access_devreg(dev
, AAC_ENABLE_INTX
);
677 dev
->msi_enabled
= 0;
678 msleep(5000); /* Delay 5 seconds */
682 static void aac_clear_omr(struct aac_dev
*dev
)
686 omr_value
= src_readl(dev
, MUnit
.OMR
);
689 * Check for PCI Errors or Kernel Panic
691 if ((omr_value
== INVALID_OMR
) || (omr_value
& KERNEL_PANIC
))
695 * Preserve MSIX Value if any
697 src_writel(dev
, MUnit
.OMR
, omr_value
& AAC_INT_MODE_MSIX
);
698 src_readl(dev
, MUnit
.OMR
);
701 static void aac_dump_fw_fib_iop_reset(struct aac_dev
*dev
)
703 __le32 supported_options3
;
708 supported_options3
= dev
->supplement_adapter_info
.supported_options3
;
709 if (!(supported_options3
& AAC_OPTION_SUPPORTED3_IOP_RESET_FIB_DUMP
))
712 aac_adapter_sync_cmd(dev
, IOP_RESET_FW_FIB_DUMP
,
713 0, 0, 0, 0, 0, 0, NULL
, NULL
, NULL
, NULL
, NULL
);
716 static bool aac_is_ctrl_up_and_running(struct aac_dev
*dev
)
719 unsigned long status
, start
;
725 status
= src_readl(dev
, MUnit
.OMR
);
727 if (status
== 0xffffffff)
730 if (status
& KERNEL_BOOTING
) {
735 if (time_after(jiffies
, start
+HZ
*SOFT_RESET_TIME
)) {
740 is_up
= status
& KERNEL_UP_AND_RUNNING
;
747 static void aac_src_drop_io(struct aac_dev
*dev
)
749 if (!dev
->soft_reset_support
)
752 aac_adapter_sync_cmd(dev
, DROP_IO
,
753 0, 0, 0, 0, 0, 0, NULL
, NULL
, NULL
, NULL
, NULL
);
756 static void aac_notify_fw_of_iop_reset(struct aac_dev
*dev
)
758 aac_adapter_sync_cmd(dev
, IOP_RESET_ALWAYS
, 0, 0, 0, 0, 0, 0, NULL
,
759 NULL
, NULL
, NULL
, NULL
);
760 aac_src_drop_io(dev
);
763 static void aac_send_iop_reset(struct aac_dev
*dev
)
765 aac_dump_fw_fib_iop_reset(dev
);
767 aac_notify_fw_of_iop_reset(dev
);
769 aac_set_intx_mode(dev
);
773 src_writel(dev
, MUnit
.IDR
, IOP_SRC_RESET_MASK
);
778 static void aac_send_hardware_soft_reset(struct aac_dev
*dev
)
783 val
= readl(((char *)(dev
->base
) + IBW_SWR_OFFSET
));
785 writel(val
, ((char *)(dev
->base
) + IBW_SWR_OFFSET
));
786 msleep_interruptible(20000);
789 static int aac_src_restart_adapter(struct aac_dev
*dev
, int bled
, u8 reset_type
)
798 dev_err(&dev
->pdev
->dev
, "adapter kernel panic'd %x.\n", bled
);
801 * When there is a BlinkLED, IOP_RESET has not effect
803 if (bled
>= 2 && dev
->sa_firmware
&& reset_type
& HW_IOP_RESET
)
804 reset_type
&= ~HW_IOP_RESET
;
806 dev
->a_ops
.adapter_enable_int
= aac_src_disable_interrupt
;
808 dev_err(&dev
->pdev
->dev
, "Controller reset type is %d\n", reset_type
);
810 if (reset_type
& HW_IOP_RESET
) {
811 dev_info(&dev
->pdev
->dev
, "Issuing IOP reset\n");
812 aac_send_iop_reset(dev
);
815 * Creates a delay or wait till up and running comes thru
817 is_ctrl_up
= aac_is_ctrl_up_and_running(dev
);
819 dev_err(&dev
->pdev
->dev
, "IOP reset failed\n");
821 dev_info(&dev
->pdev
->dev
, "IOP reset succeeded\n");
826 if (!dev
->sa_firmware
) {
827 dev_err(&dev
->pdev
->dev
, "ARC Reset attempt failed\n");
832 if (reset_type
& HW_SOFT_RESET
) {
833 dev_info(&dev
->pdev
->dev
, "Issuing SOFT reset\n");
834 aac_send_hardware_soft_reset(dev
);
835 dev
->msi_enabled
= 0;
837 is_ctrl_up
= aac_is_ctrl_up_and_running(dev
);
839 dev_err(&dev
->pdev
->dev
, "SOFT reset failed\n");
843 dev_info(&dev
->pdev
->dev
, "SOFT reset succeeded\n");
847 if (startup_timeout
< 300)
848 startup_timeout
= 300;
854 if (src_readl(dev
, MUnit
.OMR
) & KERNEL_PANIC
)
860 * aac_src_select_comm - Select communications method
862 * @comm: communications method
864 static int aac_src_select_comm(struct aac_dev
*dev
, int comm
)
867 case AAC_COMM_MESSAGE
:
868 dev
->a_ops
.adapter_intr
= aac_src_intr_message
;
869 dev
->a_ops
.adapter_deliver
= aac_src_deliver_message
;
878 * aac_src_init - initialize an Cardinal Frey Bar card
879 * @dev: device to configure
883 int aac_src_init(struct aac_dev
*dev
)
886 unsigned long status
;
888 int instance
= dev
->id
;
889 const char *name
= dev
->name
;
891 dev
->a_ops
.adapter_ioremap
= aac_src_ioremap
;
892 dev
->a_ops
.adapter_comm
= aac_src_select_comm
;
894 dev
->base_size
= AAC_MIN_SRC_BAR0_SIZE
;
895 if (aac_adapter_ioremap(dev
, dev
->base_size
)) {
896 printk(KERN_WARNING
"%s: unable to map adapter.\n", name
);
900 /* Failure to reset here is an option ... */
901 dev
->a_ops
.adapter_sync_cmd
= src_sync_cmd
;
902 dev
->a_ops
.adapter_enable_int
= aac_src_disable_interrupt
;
904 if (dev
->init_reset
) {
905 dev
->init_reset
= false;
906 if (!aac_src_restart_adapter(dev
, 0, IOP_HWSOFT_RESET
))
911 * Check to see if the board panic'd while booting.
913 status
= src_readl(dev
, MUnit
.OMR
);
914 if (status
& KERNEL_PANIC
) {
915 if (aac_src_restart_adapter(dev
,
916 aac_src_check_health(dev
), IOP_HWSOFT_RESET
))
921 * Check to see if the board failed any self tests.
923 status
= src_readl(dev
, MUnit
.OMR
);
924 if (status
& SELF_TEST_FAILED
) {
925 printk(KERN_ERR
"%s%d: adapter self-test failed.\n",
926 dev
->name
, instance
);
930 * Check to see if the monitor panic'd while booting.
932 if (status
& MONITOR_PANIC
) {
933 printk(KERN_ERR
"%s%d: adapter monitor panic.\n",
934 dev
->name
, instance
);
939 * Wait for the adapter to be up and running. Wait up to 3 minutes
941 while (!((status
= src_readl(dev
, MUnit
.OMR
)) &
942 KERNEL_UP_AND_RUNNING
)) {
944 (status
& (KERNEL_PANIC
|SELF_TEST_FAILED
|MONITOR_PANIC
))) ||
945 time_after(jiffies
, start
+HZ
*startup_timeout
)) {
946 printk(KERN_ERR
"%s%d: adapter kernel failed to start, init status = %lx.\n",
947 dev
->name
, instance
, status
);
951 ((status
& (KERNEL_PANIC
|SELF_TEST_FAILED
|MONITOR_PANIC
)) ||
952 time_after(jiffies
, start
+ HZ
*
953 ((startup_timeout
> 60)
954 ? (startup_timeout
- 60)
955 : (startup_timeout
/ 2))))) {
956 if (likely(!aac_src_restart_adapter(dev
,
957 aac_src_check_health(dev
), IOP_HWSOFT_RESET
)))
963 if (restart
&& aac_commit
)
966 * Fill in the common function dispatch table.
968 dev
->a_ops
.adapter_interrupt
= aac_src_interrupt_adapter
;
969 dev
->a_ops
.adapter_disable_int
= aac_src_disable_interrupt
;
970 dev
->a_ops
.adapter_enable_int
= aac_src_disable_interrupt
;
971 dev
->a_ops
.adapter_notify
= aac_src_notify_adapter
;
972 dev
->a_ops
.adapter_sync_cmd
= src_sync_cmd
;
973 dev
->a_ops
.adapter_check_health
= aac_src_check_health
;
974 dev
->a_ops
.adapter_restart
= aac_src_restart_adapter
;
975 dev
->a_ops
.adapter_start
= aac_src_start_adapter
;
978 * First clear out all interrupts. Then enable the one's that we
981 aac_adapter_comm(dev
, AAC_COMM_MESSAGE
);
982 aac_adapter_disable_int(dev
);
983 src_writel(dev
, MUnit
.ODR_C
, 0xffffffff);
984 aac_adapter_enable_int(dev
);
986 if (aac_init_adapter(dev
) == NULL
)
988 if (dev
->comm_interface
!= AAC_COMM_MESSAGE_TYPE1
)
991 dev
->msi
= !pci_enable_msi(dev
->pdev
);
993 dev
->aac_msix
[0].vector_no
= 0;
994 dev
->aac_msix
[0].dev
= dev
;
996 if (request_irq(dev
->pdev
->irq
, dev
->a_ops
.adapter_intr
,
997 IRQF_SHARED
, "aacraid", &(dev
->aac_msix
[0])) < 0) {
1000 pci_disable_msi(dev
->pdev
);
1002 printk(KERN_ERR
"%s%d: Interrupt unavailable.\n",
1006 dev
->dbg_base
= pci_resource_start(dev
->pdev
, 2);
1007 dev
->dbg_base_mapped
= dev
->regs
.src
.bar1
;
1008 dev
->dbg_size
= AAC_MIN_SRC_BAR1_SIZE
;
1009 dev
->a_ops
.adapter_enable_int
= aac_src_enable_interrupt_message
;
1011 aac_adapter_enable_int(dev
);
1013 if (!dev
->sync_mode
) {
1015 * Tell the adapter that all is configured, and it can
1016 * start accepting requests
1018 aac_src_start_adapter(dev
);
1027 static int aac_src_wait_sync(struct aac_dev
*dev
, int *status
)
1029 unsigned long start
= jiffies
;
1030 unsigned long usecs
= 0;
1034 while (time_before(jiffies
, start
+delay
)) {
1036 * Delay 5 microseconds to let Mon960 get info.
1041 * Mon960 will set doorbell0 bit when it has completed the
1044 if (aac_src_get_sync_status(dev
) & OUTBOUNDDOORBELL_0
) {
1046 * Clear: the doorbell.
1048 if (dev
->msi_enabled
)
1049 aac_src_access_devreg(dev
, AAC_CLEAR_SYNC_BIT
);
1051 src_writel(dev
, MUnit
.ODR_C
,
1052 OUTBOUNDDOORBELL_0
<< SRC_ODR_SHIFT
);
1059 * Yield the processor in case we are slow
1061 usecs
= 1 * USEC_PER_MSEC
;
1062 usleep_range(usecs
, usecs
+ 50);
1065 * Pull the synch status from Mailbox 0.
1067 if (status
&& !rc
) {
1068 status
[0] = readl(&dev
->IndexRegs
->Mailbox
[0]);
1069 status
[1] = readl(&dev
->IndexRegs
->Mailbox
[1]);
1070 status
[2] = readl(&dev
->IndexRegs
->Mailbox
[2]);
1071 status
[3] = readl(&dev
->IndexRegs
->Mailbox
[3]);
1072 status
[4] = readl(&dev
->IndexRegs
->Mailbox
[4]);
1079 * aac_src_soft_reset - perform soft reset to speed up
1082 * Assumptions: That the controller is in a state where we can
1083 * bring it back to life with an init struct. We can only use
1084 * fast sync commands, as the timeout is 5 seconds.
1086 * @dev: device to configure
1090 static int aac_src_soft_reset(struct aac_dev
*dev
)
1092 u32 status_omr
= src_readl(dev
, MUnit
.OMR
);
1096 char *state_str
[7] = {
1097 "GET_ADAPTER_PROPERTIES Failed",
1098 "GET_ADAPTER_PROPERTIES timeout",
1099 "SOFT_RESET not supported",
1102 "Check Health failed"
1105 if (status_omr
== INVALID_OMR
)
1106 return 1; // pcie hosed
1108 if (!(status_omr
& KERNEL_UP_AND_RUNNING
))
1109 return 1; // not up and running
1112 * We go into soft reset mode to allow us to handle response
1114 dev
->in_soft_reset
= 1;
1115 dev
->msi_enabled
= status_omr
& AAC_INT_MODE_MSIX
;
1117 /* Get adapter properties */
1118 rc
= aac_adapter_sync_cmd(dev
, GET_ADAPTER_PROPERTIES
, 0, 0, 0,
1119 0, 0, 0, status
+0, status
+1, status
+2, status
+3, status
+4);
1124 if (aac_src_wait_sync(dev
, status
)) {
1130 if (!(status
[1] & le32_to_cpu(AAC_OPT_EXTENDED
) &&
1131 (status
[4] & le32_to_cpu(AAC_EXTOPT_SOFT_RESET
)))) {
1136 if ((status
[1] & le32_to_cpu(AAC_OPT_EXTENDED
)) &&
1137 (status
[4] & le32_to_cpu(AAC_EXTOPT_SA_FIRMWARE
)))
1138 dev
->sa_firmware
= 1;
1141 rc
= aac_adapter_sync_cmd(dev
, DROP_IO
, 0, 0, 0, 0, 0, 0,
1142 status
+0, status
+1, status
+2, status
+3, status
+4);
1148 if (aac_src_wait_sync(dev
, status
)) {
1154 dev_err(&dev
->pdev
->dev
, "%s: %d outstanding I/O pending\n",
1155 __func__
, status
[1]);
1158 rc
= aac_src_check_health(dev
);
1161 dev
->in_soft_reset
= 0;
1162 dev
->msi_enabled
= 0;
1164 dev_err(&dev
->pdev
->dev
, "%s: %s status = %d", __func__
,
1165 state_str
[state
], rc
);
1170 * aac_srcv_init - initialize an SRCv card
1171 * @dev: device to configure
1175 int aac_srcv_init(struct aac_dev
*dev
)
1177 unsigned long start
;
1178 unsigned long status
;
1180 int instance
= dev
->id
;
1181 const char *name
= dev
->name
;
1183 dev
->a_ops
.adapter_ioremap
= aac_srcv_ioremap
;
1184 dev
->a_ops
.adapter_comm
= aac_src_select_comm
;
1186 dev
->base_size
= AAC_MIN_SRCV_BAR0_SIZE
;
1187 if (aac_adapter_ioremap(dev
, dev
->base_size
)) {
1188 printk(KERN_WARNING
"%s: unable to map adapter.\n", name
);
1192 /* Failure to reset here is an option ... */
1193 dev
->a_ops
.adapter_sync_cmd
= src_sync_cmd
;
1194 dev
->a_ops
.adapter_enable_int
= aac_src_disable_interrupt
;
1196 if (dev
->init_reset
) {
1197 dev
->init_reset
= false;
1198 if (aac_src_soft_reset(dev
)) {
1199 aac_src_restart_adapter(dev
, 0, IOP_HWSOFT_RESET
);
1205 * Check to see if flash update is running.
1206 * Wait for the adapter to be up and running. Wait up to 5 minutes
1208 status
= src_readl(dev
, MUnit
.OMR
);
1209 if (status
& FLASH_UPD_PENDING
) {
1212 status
= src_readl(dev
, MUnit
.OMR
);
1213 if (time_after(jiffies
, start
+HZ
*FWUPD_TIMEOUT
)) {
1214 printk(KERN_ERR
"%s%d: adapter flash update failed.\n",
1215 dev
->name
, instance
);
1218 } while (!(status
& FLASH_UPD_SUCCESS
) &&
1219 !(status
& FLASH_UPD_FAILED
));
1220 /* Delay 10 seconds.
1221 * Because right now FW is doing a soft reset,
1222 * do not read scratch pad register at this time
1227 * Check to see if the board panic'd while booting.
1229 status
= src_readl(dev
, MUnit
.OMR
);
1230 if (status
& KERNEL_PANIC
) {
1231 if (aac_src_restart_adapter(dev
,
1232 aac_src_check_health(dev
), IOP_HWSOFT_RESET
))
1237 * Check to see if the board failed any self tests.
1239 status
= src_readl(dev
, MUnit
.OMR
);
1240 if (status
& SELF_TEST_FAILED
) {
1241 printk(KERN_ERR
"%s%d: adapter self-test failed.\n", dev
->name
, instance
);
1245 * Check to see if the monitor panic'd while booting.
1247 if (status
& MONITOR_PANIC
) {
1248 printk(KERN_ERR
"%s%d: adapter monitor panic.\n", dev
->name
, instance
);
1254 * Wait for the adapter to be up and running. Wait up to 3 minutes
1257 status
= src_readl(dev
, MUnit
.OMR
);
1258 if (status
== INVALID_OMR
)
1262 (status
& (KERNEL_PANIC
|SELF_TEST_FAILED
|MONITOR_PANIC
))) ||
1263 time_after(jiffies
, start
+HZ
*startup_timeout
)) {
1264 printk(KERN_ERR
"%s%d: adapter kernel failed to start, init status = %lx.\n",
1265 dev
->name
, instance
, status
);
1269 ((status
& (KERNEL_PANIC
|SELF_TEST_FAILED
|MONITOR_PANIC
)) ||
1270 time_after(jiffies
, start
+ HZ
*
1271 ((startup_timeout
> 60)
1272 ? (startup_timeout
- 60)
1273 : (startup_timeout
/ 2))))) {
1274 if (likely(!aac_src_restart_adapter(dev
,
1275 aac_src_check_health(dev
), IOP_HWSOFT_RESET
)))
1280 } while (!(status
& KERNEL_UP_AND_RUNNING
));
1282 if (restart
&& aac_commit
)
1285 * Fill in the common function dispatch table.
1287 dev
->a_ops
.adapter_interrupt
= aac_src_interrupt_adapter
;
1288 dev
->a_ops
.adapter_disable_int
= aac_src_disable_interrupt
;
1289 dev
->a_ops
.adapter_enable_int
= aac_src_disable_interrupt
;
1290 dev
->a_ops
.adapter_notify
= aac_src_notify_adapter
;
1291 dev
->a_ops
.adapter_sync_cmd
= src_sync_cmd
;
1292 dev
->a_ops
.adapter_check_health
= aac_src_check_health
;
1293 dev
->a_ops
.adapter_restart
= aac_src_restart_adapter
;
1294 dev
->a_ops
.adapter_start
= aac_src_start_adapter
;
1297 * First clear out all interrupts. Then enable the one's that we
1300 aac_adapter_comm(dev
, AAC_COMM_MESSAGE
);
1301 aac_adapter_disable_int(dev
);
1302 src_writel(dev
, MUnit
.ODR_C
, 0xffffffff);
1303 aac_adapter_enable_int(dev
);
1305 if (aac_init_adapter(dev
) == NULL
)
1307 if ((dev
->comm_interface
!= AAC_COMM_MESSAGE_TYPE2
) &&
1308 (dev
->comm_interface
!= AAC_COMM_MESSAGE_TYPE3
))
1310 if (dev
->msi_enabled
)
1311 aac_src_access_devreg(dev
, AAC_ENABLE_MSIX
);
1313 if (aac_acquire_irq(dev
))
1316 dev
->dbg_base
= pci_resource_start(dev
->pdev
, 2);
1317 dev
->dbg_base_mapped
= dev
->regs
.src
.bar1
;
1318 dev
->dbg_size
= AAC_MIN_SRCV_BAR1_SIZE
;
1319 dev
->a_ops
.adapter_enable_int
= aac_src_enable_interrupt_message
;
1321 aac_adapter_enable_int(dev
);
1323 if (!dev
->sync_mode
) {
1325 * Tell the adapter that all is configured, and it can
1326 * start accepting requests
1328 aac_src_start_adapter(dev
);
1337 void aac_src_access_devreg(struct aac_dev
*dev
, int mode
)
1342 case AAC_ENABLE_INTERRUPT
:
1345 dev
->OIMR
= (dev
->msi_enabled
?
1346 AAC_INT_ENABLE_TYPE1_MSIX
:
1347 AAC_INT_ENABLE_TYPE1_INTX
));
1350 case AAC_DISABLE_INTERRUPT
:
1353 dev
->OIMR
= AAC_INT_DISABLE_ALL
);
1356 case AAC_ENABLE_MSIX
:
1358 val
= src_readl(dev
, MUnit
.IDR
);
1360 src_writel(dev
, MUnit
.IDR
, val
);
1361 src_readl(dev
, MUnit
.IDR
);
1363 val
= PMC_ALL_INTERRUPT_BITS
;
1364 src_writel(dev
, MUnit
.IOAR
, val
);
1365 val
= src_readl(dev
, MUnit
.OIMR
);
1368 val
& (~(PMC_GLOBAL_INT_BIT2
| PMC_GLOBAL_INT_BIT0
)));
1371 case AAC_DISABLE_MSIX
:
1373 val
= src_readl(dev
, MUnit
.IDR
);
1375 src_writel(dev
, MUnit
.IDR
, val
);
1376 src_readl(dev
, MUnit
.IDR
);
1379 case AAC_CLEAR_AIF_BIT
:
1381 val
= src_readl(dev
, MUnit
.IDR
);
1383 src_writel(dev
, MUnit
.IDR
, val
);
1384 src_readl(dev
, MUnit
.IDR
);
1387 case AAC_CLEAR_SYNC_BIT
:
1389 val
= src_readl(dev
, MUnit
.IDR
);
1391 src_writel(dev
, MUnit
.IDR
, val
);
1392 src_readl(dev
, MUnit
.IDR
);
1395 case AAC_ENABLE_INTX
:
1397 val
= src_readl(dev
, MUnit
.IDR
);
1399 src_writel(dev
, MUnit
.IDR
, val
);
1400 src_readl(dev
, MUnit
.IDR
);
1402 val
= PMC_ALL_INTERRUPT_BITS
;
1403 src_writel(dev
, MUnit
.IOAR
, val
);
1404 src_readl(dev
, MUnit
.IOAR
);
1405 val
= src_readl(dev
, MUnit
.OIMR
);
1406 src_writel(dev
, MUnit
.OIMR
,
1407 val
& (~(PMC_GLOBAL_INT_BIT2
)));
1415 static int aac_src_get_sync_status(struct aac_dev
*dev
)
1420 msix_val
= src_readl(dev
, MUnit
.ODR_MSI
) & SRC_MSI_READ_MASK
? 1 : 0;
1422 if (!dev
->msi_enabled
) {
1424 * if Legacy int status indicates cmd is not complete
1425 * sample MSIx register to see if it indiactes cmd complete,
1426 * if yes set the controller in MSIx mode and consider cmd
1429 legacy_val
= src_readl(dev
, MUnit
.ODR_R
) >> SRC_ODR_SHIFT
;
1430 if (!(legacy_val
& 1) && msix_val
)
1431 dev
->msi_enabled
= 1;