1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Adaptec AAC series RAID controller driver
4 * (c) Copyright 2001 Red Hat Inc.
6 * based on the old aacraid driver that is..
7 * Adaptec aacraid device driver for Linux.
9 * Copyright (c) 2000-2010 Adaptec, Inc.
10 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
11 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
16 * Abstract: Hardware Device Interface for PMC SRC based controllers
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/types.h>
22 #include <linux/pci.h>
23 #include <linux/spinlock.h>
24 #include <linux/slab.h>
25 #include <linux/blkdev.h>
26 #include <linux/delay.h>
27 #include <linux/completion.h>
28 #include <linux/time.h>
29 #include <linux/interrupt.h>
30 #include <scsi/scsi_host.h>
34 static int aac_src_get_sync_status(struct aac_dev
*dev
);
36 static irqreturn_t
aac_src_intr_message(int irq
, void *dev_id
)
38 struct aac_msix_ctx
*ctx
;
40 unsigned long bellbits
, bellbits_shifted
;
42 int isFastResponse
, mode
;
45 ctx
= (struct aac_msix_ctx
*)dev_id
;
47 vector_no
= ctx
->vector_no
;
49 if (dev
->msi_enabled
) {
50 mode
= AAC_INT_MODE_MSI
;
52 bellbits
= src_readl(dev
, MUnit
.ODR_MSI
);
53 if (bellbits
& 0x40000)
54 mode
|= AAC_INT_MODE_AIF
;
55 if (bellbits
& 0x1000)
56 mode
|= AAC_INT_MODE_SYNC
;
59 mode
= AAC_INT_MODE_INTX
;
60 bellbits
= src_readl(dev
, MUnit
.ODR_R
);
61 if (bellbits
& PmDoorBellResponseSent
) {
62 bellbits
= PmDoorBellResponseSent
;
63 src_writel(dev
, MUnit
.ODR_C
, bellbits
);
64 src_readl(dev
, MUnit
.ODR_C
);
66 bellbits_shifted
= (bellbits
>> SRC_ODR_SHIFT
);
67 src_writel(dev
, MUnit
.ODR_C
, bellbits
);
68 src_readl(dev
, MUnit
.ODR_C
);
70 if (bellbits_shifted
& DoorBellAifPending
)
71 mode
|= AAC_INT_MODE_AIF
;
72 else if (bellbits_shifted
& OUTBOUNDDOORBELL_0
)
73 mode
|= AAC_INT_MODE_SYNC
;
77 if (mode
& AAC_INT_MODE_SYNC
) {
79 struct list_head
*entry
;
81 extern int aac_sync_mode
;
83 if (!aac_sync_mode
&& !dev
->msi_enabled
) {
84 src_writel(dev
, MUnit
.ODR_C
, bellbits
);
85 src_readl(dev
, MUnit
.ODR_C
);
89 if (dev
->sync_fib
->callback
)
90 dev
->sync_fib
->callback(dev
->sync_fib
->callback_data
,
92 spin_lock_irqsave(&dev
->sync_fib
->event_lock
, sflags
);
93 if (dev
->sync_fib
->flags
& FIB_CONTEXT_FLAG_WAIT
) {
94 dev
->management_fib_count
--;
95 complete(&dev
->sync_fib
->event_wait
);
97 spin_unlock_irqrestore(&dev
->sync_fib
->event_lock
,
99 spin_lock_irqsave(&dev
->sync_lock
, sflags
);
100 if (!list_empty(&dev
->sync_fib_list
)) {
101 entry
= dev
->sync_fib_list
.next
;
102 dev
->sync_fib
= list_entry(entry
,
108 dev
->sync_fib
= NULL
;
110 spin_unlock_irqrestore(&dev
->sync_lock
, sflags
);
112 aac_adapter_sync_cmd(dev
, SEND_SYNCHRONOUS_FIB
,
113 (u32
)dev
->sync_fib
->hw_fib_pa
,
115 NULL
, NULL
, NULL
, NULL
, NULL
);
118 if (!dev
->msi_enabled
)
123 if (mode
& AAC_INT_MODE_AIF
) {
125 if (dev
->sa_firmware
) {
126 u32 events
= src_readl(dev
, MUnit
.SCR0
);
128 aac_intr_normal(dev
, events
, 1, 0, NULL
);
129 writel(events
, &dev
->IndexRegs
->Mailbox
[0]);
130 src_writel(dev
, MUnit
.IDR
, 1 << 23);
132 if (dev
->aif_thread
&& dev
->fsa_dev
)
133 aac_intr_normal(dev
, 0, 2, 0, NULL
);
135 if (dev
->msi_enabled
)
136 aac_src_access_devreg(dev
, AAC_CLEAR_AIF_BIT
);
141 index
= dev
->host_rrq_idx
[vector_no
];
145 /* remove toggle bit (31) */
146 handle
= le32_to_cpu((dev
->host_rrq
[index
])
148 /* check fast response bits (30, 1) */
149 if (handle
& 0x40000000)
151 handle
&= 0x0000ffff;
155 if (dev
->msi_enabled
&& dev
->max_msix
> 1)
156 atomic_dec(&dev
->rrq_outstanding
[vector_no
]);
157 aac_intr_normal(dev
, handle
, 0, isFastResponse
, NULL
);
158 dev
->host_rrq
[index
++] = 0;
159 if (index
== (vector_no
+ 1) * dev
->vector_cap
)
160 index
= vector_no
* dev
->vector_cap
;
161 dev
->host_rrq_idx
[vector_no
] = index
;
170 * aac_src_disable_interrupt - Disable interrupts
174 static void aac_src_disable_interrupt(struct aac_dev
*dev
)
176 src_writel(dev
, MUnit
.OIMR
, dev
->OIMR
= 0xffffffff);
180 * aac_src_enable_interrupt_message - Enable interrupts
184 static void aac_src_enable_interrupt_message(struct aac_dev
*dev
)
186 aac_src_access_devreg(dev
, AAC_ENABLE_INTERRUPT
);
190 * src_sync_cmd - send a command and wait
192 * @command: Command to execute
193 * @p1: first parameter
194 * @ret: adapter status
196 * This routine will send a synchronous command to the adapter and wait
197 * for its completion.
200 static int src_sync_cmd(struct aac_dev
*dev
, u32 command
,
201 u32 p1
, u32 p2
, u32 p3
, u32 p4
, u32 p5
, u32 p6
,
202 u32
*status
, u32
* r1
, u32
* r2
, u32
* r3
, u32
* r4
)
209 * Write the command into Mailbox 0
211 writel(command
, &dev
->IndexRegs
->Mailbox
[0]);
213 * Write the parameters into Mailboxes 1 - 6
215 writel(p1
, &dev
->IndexRegs
->Mailbox
[1]);
216 writel(p2
, &dev
->IndexRegs
->Mailbox
[2]);
217 writel(p3
, &dev
->IndexRegs
->Mailbox
[3]);
218 writel(p4
, &dev
->IndexRegs
->Mailbox
[4]);
221 * Clear the synch command doorbell to start on a clean slate.
223 if (!dev
->msi_enabled
)
226 OUTBOUNDDOORBELL_0
<< SRC_ODR_SHIFT
);
229 * Disable doorbell interrupts
231 src_writel(dev
, MUnit
.OIMR
, dev
->OIMR
= 0xffffffff);
234 * Force the completion of the mask register write before issuing
237 src_readl(dev
, MUnit
.OIMR
);
240 * Signal that there is a new synch command
242 src_writel(dev
, MUnit
.IDR
, INBOUNDDOORBELL_0
<< SRC_IDR_SHIFT
);
244 if ((!dev
->sync_mode
|| command
!= SEND_SYNCHRONOUS_FIB
) &&
245 !dev
->in_soft_reset
) {
249 if (command
== IOP_RESET_ALWAYS
) {
250 /* Wait up to 10 sec */
253 /* Wait up to 5 minutes */
256 while (time_before(jiffies
, start
+delay
)) {
257 udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
259 * Mon960 will set doorbell0 bit when it has completed the command.
261 if (aac_src_get_sync_status(dev
) & OUTBOUNDDOORBELL_0
) {
263 * Clear the doorbell.
265 if (dev
->msi_enabled
)
266 aac_src_access_devreg(dev
,
271 OUTBOUNDDOORBELL_0
<< SRC_ODR_SHIFT
);
276 * Yield the processor in case we are slow
280 if (unlikely(ok
!= 1)) {
282 * Restore interrupt mask even though we timed out
284 aac_adapter_enable_int(dev
);
288 * Pull the synch status from Mailbox 0.
291 *status
= readl(&dev
->IndexRegs
->Mailbox
[0]);
293 *r1
= readl(&dev
->IndexRegs
->Mailbox
[1]);
295 *r2
= readl(&dev
->IndexRegs
->Mailbox
[2]);
297 *r3
= readl(&dev
->IndexRegs
->Mailbox
[3]);
299 *r4
= readl(&dev
->IndexRegs
->Mailbox
[4]);
300 if (command
== GET_COMM_PREFERRED_SETTINGS
)
302 readl(&dev
->IndexRegs
->Mailbox
[5]) & 0xFFFF;
304 * Clear the synch command doorbell.
306 if (!dev
->msi_enabled
)
309 OUTBOUNDDOORBELL_0
<< SRC_ODR_SHIFT
);
313 * Restore interrupt mask
315 aac_adapter_enable_int(dev
);
320 * aac_src_interrupt_adapter - interrupt adapter
323 * Send an interrupt to the i960 and breakpoint it.
326 static void aac_src_interrupt_adapter(struct aac_dev
*dev
)
328 src_sync_cmd(dev
, BREAKPOINT_REQUEST
,
330 NULL
, NULL
, NULL
, NULL
, NULL
);
334 * aac_src_notify_adapter - send an event to the adapter
336 * @event: Event to send
338 * Notify the i960 that something it probably cares about has
342 static void aac_src_notify_adapter(struct aac_dev
*dev
, u32 event
)
347 src_writel(dev
, MUnit
.ODR_C
,
348 INBOUNDDOORBELL_1
<< SRC_ODR_SHIFT
);
350 case HostNormRespNotFull
:
351 src_writel(dev
, MUnit
.ODR_C
,
352 INBOUNDDOORBELL_4
<< SRC_ODR_SHIFT
);
354 case AdapNormRespQue
:
355 src_writel(dev
, MUnit
.ODR_C
,
356 INBOUNDDOORBELL_2
<< SRC_ODR_SHIFT
);
358 case HostNormCmdNotFull
:
359 src_writel(dev
, MUnit
.ODR_C
,
360 INBOUNDDOORBELL_3
<< SRC_ODR_SHIFT
);
363 src_writel(dev
, MUnit
.ODR_C
,
364 INBOUNDDOORBELL_6
<< SRC_ODR_SHIFT
);
367 src_writel(dev
, MUnit
.ODR_C
,
368 INBOUNDDOORBELL_5
<< SRC_ODR_SHIFT
);
377 * aac_src_start_adapter - activate adapter
380 * Start up processing on an i960 based AAC adapter
383 static void aac_src_start_adapter(struct aac_dev
*dev
)
385 union aac_init
*init
;
388 /* reset host_rrq_idx first */
389 for (i
= 0; i
< dev
->max_msix
; i
++) {
390 dev
->host_rrq_idx
[i
] = i
* dev
->vector_cap
;
391 atomic_set(&dev
->rrq_outstanding
[i
], 0);
393 atomic_set(&dev
->msix_counter
, 0);
394 dev
->fibs_pushed_no
= 0;
397 if (dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE3
) {
398 init
->r8
.host_elapsed_seconds
=
399 cpu_to_le32(ktime_get_real_seconds());
400 src_sync_cmd(dev
, INIT_STRUCT_BASE_ADDRESS
,
401 lower_32_bits(dev
->init_pa
),
402 upper_32_bits(dev
->init_pa
),
404 (AAC_MAX_HRRQ
- 1) * sizeof(struct _rrq
),
405 0, 0, 0, NULL
, NULL
, NULL
, NULL
, NULL
);
407 init
->r7
.host_elapsed_seconds
=
408 cpu_to_le32(ktime_get_real_seconds());
409 // We can only use a 32 bit address here
410 src_sync_cmd(dev
, INIT_STRUCT_BASE_ADDRESS
,
411 (u32
)(ulong
)dev
->init_pa
, 0, 0, 0, 0, 0,
412 NULL
, NULL
, NULL
, NULL
, NULL
);
418 * aac_src_check_health
419 * @dev: device to check if healthy
421 * Will attempt to determine if the specified adapter is alive and
422 * capable of handling requests, returning 0 if alive.
424 static int aac_src_check_health(struct aac_dev
*dev
)
426 u32 status
= src_readl(dev
, MUnit
.OMR
);
429 * Check to see if the board panic'd.
431 if (unlikely(status
& KERNEL_PANIC
))
435 * Check to see if the board failed any self tests.
437 if (unlikely(status
& SELF_TEST_FAILED
))
441 * Check to see if the board failed any self tests.
443 if (unlikely(status
& MONITOR_PANIC
))
447 * Wait for the adapter to be up and running.
449 if (unlikely(!(status
& KERNEL_UP_AND_RUNNING
)))
460 return (status
>> 16) & 0xFF;
463 static inline u32
aac_get_vector(struct aac_dev
*dev
)
465 return atomic_inc_return(&dev
->msix_counter
)%dev
->max_msix
;
469 * aac_src_deliver_message
472 * Will send a fib, returning 0 if successful.
474 static int aac_src_deliver_message(struct fib
*fib
)
476 struct aac_dev
*dev
= fib
->dev
;
477 struct aac_queue
*q
= &dev
->queues
->queue
[AdapNormCmdQueue
];
480 struct aac_fib_xporthdr
*pFibX
;
488 atomic_inc(&q
->numpending
);
490 native_hba
= (fib
->flags
& FIB_CONTEXT_FLAG_NATIVE_HBA
) ? 1 : 0;
493 if (dev
->msi_enabled
&& dev
->max_msix
> 1 &&
494 (native_hba
|| fib
->hw_fib_va
->header
.Command
!= AifRequest
)) {
496 if ((dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE3
)
498 vector_no
= aac_get_vector(dev
);
500 vector_no
= fib
->vector_no
;
503 if (fib
->flags
& FIB_CONTEXT_FLAG_NATIVE_HBA_TMF
) {
504 struct aac_hba_tm_req
*tm_req
;
506 tm_req
= (struct aac_hba_tm_req
*)
508 if (tm_req
->iu_type
==
509 HBA_IU_TYPE_SCSI_TM_REQ
) {
510 ((struct aac_hba_tm_req
*)
511 fib
->hw_fib_va
)->reply_qid
513 ((struct aac_hba_tm_req
*)
514 fib
->hw_fib_va
)->request_id
515 += (vector_no
<< 16);
517 ((struct aac_hba_reset_req
*)
518 fib
->hw_fib_va
)->reply_qid
520 ((struct aac_hba_reset_req
*)
521 fib
->hw_fib_va
)->request_id
522 += (vector_no
<< 16);
525 ((struct aac_hba_cmd_req
*)
526 fib
->hw_fib_va
)->reply_qid
528 ((struct aac_hba_cmd_req
*)
529 fib
->hw_fib_va
)->request_id
530 += (vector_no
<< 16);
533 fib
->hw_fib_va
->header
.Handle
+= (vector_no
<< 16);
539 atomic_inc(&dev
->rrq_outstanding
[vector_no
]);
542 address
= fib
->hw_fib_pa
;
543 fibsize
= (fib
->hbacmd_size
+ 127) / 128 - 1;
548 src_writeq(dev
, MUnit
.IQN_L
, (u64
)address
);
550 spin_lock_irqsave(&fib
->dev
->iq_lock
, flags
);
551 src_writel(dev
, MUnit
.IQN_H
,
552 upper_32_bits(address
) & 0xffffffff);
553 src_writel(dev
, MUnit
.IQN_L
, address
& 0xffffffff);
554 spin_unlock_irqrestore(&fib
->dev
->iq_lock
, flags
);
557 if (dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE2
||
558 dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE3
) {
559 /* Calculate the amount to the fibsize bits */
560 fibsize
= (le16_to_cpu(fib
->hw_fib_va
->header
.Size
)
562 /* New FIB header, 32-bit */
563 address
= fib
->hw_fib_pa
;
564 fib
->hw_fib_va
->header
.StructType
= FIB_MAGIC2
;
565 fib
->hw_fib_va
->header
.SenderFibAddress
=
566 cpu_to_le32((u32
)address
);
567 fib
->hw_fib_va
->header
.u
.TimeStamp
= 0;
568 WARN_ON(upper_32_bits(address
) != 0L);
570 /* Calculate the amount to the fibsize bits */
571 fibsize
= (sizeof(struct aac_fib_xporthdr
) +
572 le16_to_cpu(fib
->hw_fib_va
->header
.Size
)
574 /* Fill XPORT header */
575 pFibX
= (struct aac_fib_xporthdr
*)
576 ((unsigned char *)fib
->hw_fib_va
-
577 sizeof(struct aac_fib_xporthdr
));
578 pFibX
->Handle
= fib
->hw_fib_va
->header
.Handle
;
580 cpu_to_le64((u64
)fib
->hw_fib_pa
);
581 pFibX
->Size
= cpu_to_le32(
582 le16_to_cpu(fib
->hw_fib_va
->header
.Size
));
583 address
= fib
->hw_fib_pa
-
584 (u64
)sizeof(struct aac_fib_xporthdr
);
591 src_writeq(dev
, MUnit
.IQ_L
, (u64
)address
);
593 spin_lock_irqsave(&fib
->dev
->iq_lock
, flags
);
594 src_writel(dev
, MUnit
.IQ_H
,
595 upper_32_bits(address
) & 0xffffffff);
596 src_writel(dev
, MUnit
.IQ_L
, address
& 0xffffffff);
597 spin_unlock_irqrestore(&fib
->dev
->iq_lock
, flags
);
605 * @size: mapping resize request
608 static int aac_src_ioremap(struct aac_dev
*dev
, u32 size
)
611 iounmap(dev
->regs
.src
.bar1
);
612 dev
->regs
.src
.bar1
= NULL
;
613 iounmap(dev
->regs
.src
.bar0
);
614 dev
->base
= dev
->regs
.src
.bar0
= NULL
;
617 dev
->regs
.src
.bar1
= ioremap(pci_resource_start(dev
->pdev
, 2),
618 AAC_MIN_SRC_BAR1_SIZE
);
620 if (dev
->regs
.src
.bar1
== NULL
)
622 dev
->base
= dev
->regs
.src
.bar0
= ioremap(dev
->base_start
, size
);
623 if (dev
->base
== NULL
) {
624 iounmap(dev
->regs
.src
.bar1
);
625 dev
->regs
.src
.bar1
= NULL
;
628 dev
->IndexRegs
= &((struct src_registers __iomem
*)
629 dev
->base
)->u
.tupelo
.IndexRegs
;
635 * @size: mapping resize request
638 static int aac_srcv_ioremap(struct aac_dev
*dev
, u32 size
)
641 iounmap(dev
->regs
.src
.bar0
);
642 dev
->base
= dev
->regs
.src
.bar0
= NULL
;
647 ioremap(pci_resource_start(dev
->pdev
, 2), AAC_MIN_SRCV_BAR1_SIZE
);
649 if (dev
->regs
.src
.bar1
== NULL
)
651 dev
->base
= dev
->regs
.src
.bar0
= ioremap(dev
->base_start
, size
);
652 if (dev
->base
== NULL
) {
653 iounmap(dev
->regs
.src
.bar1
);
654 dev
->regs
.src
.bar1
= NULL
;
657 dev
->IndexRegs
= &((struct src_registers __iomem
*)
658 dev
->base
)->u
.denali
.IndexRegs
;
662 void aac_set_intx_mode(struct aac_dev
*dev
)
664 if (dev
->msi_enabled
) {
665 aac_src_access_devreg(dev
, AAC_ENABLE_INTX
);
666 dev
->msi_enabled
= 0;
667 msleep(5000); /* Delay 5 seconds */
671 static void aac_clear_omr(struct aac_dev
*dev
)
675 omr_value
= src_readl(dev
, MUnit
.OMR
);
678 * Check for PCI Errors or Kernel Panic
680 if ((omr_value
== INVALID_OMR
) || (omr_value
& KERNEL_PANIC
))
684 * Preserve MSIX Value if any
686 src_writel(dev
, MUnit
.OMR
, omr_value
& AAC_INT_MODE_MSIX
);
687 src_readl(dev
, MUnit
.OMR
);
690 static void aac_dump_fw_fib_iop_reset(struct aac_dev
*dev
)
692 __le32 supported_options3
;
697 supported_options3
= dev
->supplement_adapter_info
.supported_options3
;
698 if (!(supported_options3
& AAC_OPTION_SUPPORTED3_IOP_RESET_FIB_DUMP
))
701 aac_adapter_sync_cmd(dev
, IOP_RESET_FW_FIB_DUMP
,
702 0, 0, 0, 0, 0, 0, NULL
, NULL
, NULL
, NULL
, NULL
);
705 static bool aac_is_ctrl_up_and_running(struct aac_dev
*dev
)
708 unsigned long status
, start
;
714 status
= src_readl(dev
, MUnit
.OMR
);
716 if (status
== 0xffffffff)
719 if (status
& KERNEL_BOOTING
) {
724 if (time_after(jiffies
, start
+HZ
*SOFT_RESET_TIME
)) {
729 is_up
= status
& KERNEL_UP_AND_RUNNING
;
736 static void aac_notify_fw_of_iop_reset(struct aac_dev
*dev
)
738 aac_adapter_sync_cmd(dev
, IOP_RESET_ALWAYS
, 0, 0, 0, 0, 0, 0, NULL
,
739 NULL
, NULL
, NULL
, NULL
);
742 static void aac_send_iop_reset(struct aac_dev
*dev
)
744 aac_dump_fw_fib_iop_reset(dev
);
746 aac_notify_fw_of_iop_reset(dev
);
748 aac_set_intx_mode(dev
);
752 src_writel(dev
, MUnit
.IDR
, IOP_SRC_RESET_MASK
);
757 static void aac_send_hardware_soft_reset(struct aac_dev
*dev
)
762 val
= readl(((char *)(dev
->base
) + IBW_SWR_OFFSET
));
764 writel(val
, ((char *)(dev
->base
) + IBW_SWR_OFFSET
));
765 msleep_interruptible(20000);
768 static int aac_src_restart_adapter(struct aac_dev
*dev
, int bled
, u8 reset_type
)
777 dev_err(&dev
->pdev
->dev
, "adapter kernel panic'd %x.\n", bled
);
780 * When there is a BlinkLED, IOP_RESET has not effect
782 if (bled
>= 2 && dev
->sa_firmware
&& reset_type
& HW_IOP_RESET
)
783 reset_type
&= ~HW_IOP_RESET
;
785 dev
->a_ops
.adapter_enable_int
= aac_src_disable_interrupt
;
787 dev_err(&dev
->pdev
->dev
, "Controller reset type is %d\n", reset_type
);
789 if (reset_type
& HW_IOP_RESET
) {
790 dev_info(&dev
->pdev
->dev
, "Issuing IOP reset\n");
791 aac_send_iop_reset(dev
);
794 * Creates a delay or wait till up and running comes thru
796 is_ctrl_up
= aac_is_ctrl_up_and_running(dev
);
798 dev_err(&dev
->pdev
->dev
, "IOP reset failed\n");
800 dev_info(&dev
->pdev
->dev
, "IOP reset succeeded\n");
805 if (!dev
->sa_firmware
) {
806 dev_err(&dev
->pdev
->dev
, "ARC Reset attempt failed\n");
811 if (reset_type
& HW_SOFT_RESET
) {
812 dev_info(&dev
->pdev
->dev
, "Issuing SOFT reset\n");
813 aac_send_hardware_soft_reset(dev
);
814 dev
->msi_enabled
= 0;
816 is_ctrl_up
= aac_is_ctrl_up_and_running(dev
);
818 dev_err(&dev
->pdev
->dev
, "SOFT reset failed\n");
822 dev_info(&dev
->pdev
->dev
, "SOFT reset succeeded\n");
826 if (startup_timeout
< 300)
827 startup_timeout
= 300;
833 if (src_readl(dev
, MUnit
.OMR
) & KERNEL_PANIC
)
839 * aac_src_select_comm - Select communications method
841 * @comm: communications method
843 static int aac_src_select_comm(struct aac_dev
*dev
, int comm
)
846 case AAC_COMM_MESSAGE
:
847 dev
->a_ops
.adapter_intr
= aac_src_intr_message
;
848 dev
->a_ops
.adapter_deliver
= aac_src_deliver_message
;
857 * aac_src_init - initialize an Cardinal Frey Bar card
858 * @dev: device to configure
862 int aac_src_init(struct aac_dev
*dev
)
865 unsigned long status
;
867 int instance
= dev
->id
;
868 const char *name
= dev
->name
;
870 dev
->a_ops
.adapter_ioremap
= aac_src_ioremap
;
871 dev
->a_ops
.adapter_comm
= aac_src_select_comm
;
873 dev
->base_size
= AAC_MIN_SRC_BAR0_SIZE
;
874 if (aac_adapter_ioremap(dev
, dev
->base_size
)) {
875 printk(KERN_WARNING
"%s: unable to map adapter.\n", name
);
879 /* Failure to reset here is an option ... */
880 dev
->a_ops
.adapter_sync_cmd
= src_sync_cmd
;
881 dev
->a_ops
.adapter_enable_int
= aac_src_disable_interrupt
;
883 if (dev
->init_reset
) {
884 dev
->init_reset
= false;
885 if (!aac_src_restart_adapter(dev
, 0, IOP_HWSOFT_RESET
))
890 * Check to see if the board panic'd while booting.
892 status
= src_readl(dev
, MUnit
.OMR
);
893 if (status
& KERNEL_PANIC
) {
894 if (aac_src_restart_adapter(dev
,
895 aac_src_check_health(dev
), IOP_HWSOFT_RESET
))
900 * Check to see if the board failed any self tests.
902 status
= src_readl(dev
, MUnit
.OMR
);
903 if (status
& SELF_TEST_FAILED
) {
904 printk(KERN_ERR
"%s%d: adapter self-test failed.\n",
905 dev
->name
, instance
);
909 * Check to see if the monitor panic'd while booting.
911 if (status
& MONITOR_PANIC
) {
912 printk(KERN_ERR
"%s%d: adapter monitor panic.\n",
913 dev
->name
, instance
);
918 * Wait for the adapter to be up and running. Wait up to 3 minutes
920 while (!((status
= src_readl(dev
, MUnit
.OMR
)) &
921 KERNEL_UP_AND_RUNNING
)) {
923 (status
& (KERNEL_PANIC
|SELF_TEST_FAILED
|MONITOR_PANIC
))) ||
924 time_after(jiffies
, start
+HZ
*startup_timeout
)) {
925 printk(KERN_ERR
"%s%d: adapter kernel failed to start, init status = %lx.\n",
926 dev
->name
, instance
, status
);
930 ((status
& (KERNEL_PANIC
|SELF_TEST_FAILED
|MONITOR_PANIC
)) ||
931 time_after(jiffies
, start
+ HZ
*
932 ((startup_timeout
> 60)
933 ? (startup_timeout
- 60)
934 : (startup_timeout
/ 2))))) {
935 if (likely(!aac_src_restart_adapter(dev
,
936 aac_src_check_health(dev
), IOP_HWSOFT_RESET
)))
942 if (restart
&& aac_commit
)
945 * Fill in the common function dispatch table.
947 dev
->a_ops
.adapter_interrupt
= aac_src_interrupt_adapter
;
948 dev
->a_ops
.adapter_disable_int
= aac_src_disable_interrupt
;
949 dev
->a_ops
.adapter_enable_int
= aac_src_disable_interrupt
;
950 dev
->a_ops
.adapter_notify
= aac_src_notify_adapter
;
951 dev
->a_ops
.adapter_sync_cmd
= src_sync_cmd
;
952 dev
->a_ops
.adapter_check_health
= aac_src_check_health
;
953 dev
->a_ops
.adapter_restart
= aac_src_restart_adapter
;
954 dev
->a_ops
.adapter_start
= aac_src_start_adapter
;
957 * First clear out all interrupts. Then enable the one's that we
960 aac_adapter_comm(dev
, AAC_COMM_MESSAGE
);
961 aac_adapter_disable_int(dev
);
962 src_writel(dev
, MUnit
.ODR_C
, 0xffffffff);
963 aac_adapter_enable_int(dev
);
965 if (aac_init_adapter(dev
) == NULL
)
967 if (dev
->comm_interface
!= AAC_COMM_MESSAGE_TYPE1
)
970 dev
->msi
= !pci_enable_msi(dev
->pdev
);
972 dev
->aac_msix
[0].vector_no
= 0;
973 dev
->aac_msix
[0].dev
= dev
;
975 if (request_irq(dev
->pdev
->irq
, dev
->a_ops
.adapter_intr
,
976 IRQF_SHARED
, "aacraid", &(dev
->aac_msix
[0])) < 0) {
979 pci_disable_msi(dev
->pdev
);
981 printk(KERN_ERR
"%s%d: Interrupt unavailable.\n",
985 dev
->dbg_base
= pci_resource_start(dev
->pdev
, 2);
986 dev
->dbg_base_mapped
= dev
->regs
.src
.bar1
;
987 dev
->dbg_size
= AAC_MIN_SRC_BAR1_SIZE
;
988 dev
->a_ops
.adapter_enable_int
= aac_src_enable_interrupt_message
;
990 aac_adapter_enable_int(dev
);
992 if (!dev
->sync_mode
) {
994 * Tell the adapter that all is configured, and it can
995 * start accepting requests
997 aac_src_start_adapter(dev
);
1006 static int aac_src_wait_sync(struct aac_dev
*dev
, int *status
)
1008 unsigned long start
= jiffies
;
1009 unsigned long usecs
= 0;
1013 while (time_before(jiffies
, start
+delay
)) {
1015 * Delay 5 microseconds to let Mon960 get info.
1020 * Mon960 will set doorbell0 bit when it has completed the
1023 if (aac_src_get_sync_status(dev
) & OUTBOUNDDOORBELL_0
) {
1025 * Clear: the doorbell.
1027 if (dev
->msi_enabled
)
1028 aac_src_access_devreg(dev
, AAC_CLEAR_SYNC_BIT
);
1030 src_writel(dev
, MUnit
.ODR_C
,
1031 OUTBOUNDDOORBELL_0
<< SRC_ODR_SHIFT
);
1038 * Yield the processor in case we are slow
1040 usecs
= 1 * USEC_PER_MSEC
;
1041 usleep_range(usecs
, usecs
+ 50);
1044 * Pull the synch status from Mailbox 0.
1046 if (status
&& !rc
) {
1047 status
[0] = readl(&dev
->IndexRegs
->Mailbox
[0]);
1048 status
[1] = readl(&dev
->IndexRegs
->Mailbox
[1]);
1049 status
[2] = readl(&dev
->IndexRegs
->Mailbox
[2]);
1050 status
[3] = readl(&dev
->IndexRegs
->Mailbox
[3]);
1051 status
[4] = readl(&dev
->IndexRegs
->Mailbox
[4]);
1058 * aac_src_soft_reset - perform soft reset to speed up
1061 * Assumptions: That the controller is in a state where we can
1062 * bring it back to life with an init struct. We can only use
1063 * fast sync commands, as the timeout is 5 seconds.
1065 * @dev: device to configure
1069 static int aac_src_soft_reset(struct aac_dev
*dev
)
1071 u32 status_omr
= src_readl(dev
, MUnit
.OMR
);
1075 char *state_str
[7] = {
1076 "GET_ADAPTER_PROPERTIES Failed",
1077 "GET_ADAPTER_PROPERTIES timeout",
1078 "SOFT_RESET not supported",
1081 "Check Health failed"
1084 if (status_omr
== INVALID_OMR
)
1085 return 1; // pcie hosed
1087 if (!(status_omr
& KERNEL_UP_AND_RUNNING
))
1088 return 1; // not up and running
1091 * We go into soft reset mode to allow us to handle response
1093 dev
->in_soft_reset
= 1;
1094 dev
->msi_enabled
= status_omr
& AAC_INT_MODE_MSIX
;
1096 /* Get adapter properties */
1097 rc
= aac_adapter_sync_cmd(dev
, GET_ADAPTER_PROPERTIES
, 0, 0, 0,
1098 0, 0, 0, status
+0, status
+1, status
+2, status
+3, status
+4);
1103 if (aac_src_wait_sync(dev
, status
)) {
1109 if (!(status
[1] & le32_to_cpu(AAC_OPT_EXTENDED
) &&
1110 (status
[4] & le32_to_cpu(AAC_EXTOPT_SOFT_RESET
)))) {
1115 if ((status
[1] & le32_to_cpu(AAC_OPT_EXTENDED
)) &&
1116 (status
[4] & le32_to_cpu(AAC_EXTOPT_SA_FIRMWARE
)))
1117 dev
->sa_firmware
= 1;
1120 rc
= aac_adapter_sync_cmd(dev
, DROP_IO
, 0, 0, 0, 0, 0, 0,
1121 status
+0, status
+1, status
+2, status
+3, status
+4);
1127 if (aac_src_wait_sync(dev
, status
)) {
1133 dev_err(&dev
->pdev
->dev
, "%s: %d outstanding I/O pending\n",
1134 __func__
, status
[1]);
1137 rc
= aac_src_check_health(dev
);
1140 dev
->in_soft_reset
= 0;
1141 dev
->msi_enabled
= 0;
1143 dev_err(&dev
->pdev
->dev
, "%s: %s status = %d", __func__
,
1144 state_str
[state
], rc
);
1149 * aac_srcv_init - initialize an SRCv card
1150 * @dev: device to configure
1154 int aac_srcv_init(struct aac_dev
*dev
)
1156 unsigned long start
;
1157 unsigned long status
;
1159 int instance
= dev
->id
;
1160 const char *name
= dev
->name
;
1162 dev
->a_ops
.adapter_ioremap
= aac_srcv_ioremap
;
1163 dev
->a_ops
.adapter_comm
= aac_src_select_comm
;
1165 dev
->base_size
= AAC_MIN_SRCV_BAR0_SIZE
;
1166 if (aac_adapter_ioremap(dev
, dev
->base_size
)) {
1167 printk(KERN_WARNING
"%s: unable to map adapter.\n", name
);
1171 /* Failure to reset here is an option ... */
1172 dev
->a_ops
.adapter_sync_cmd
= src_sync_cmd
;
1173 dev
->a_ops
.adapter_enable_int
= aac_src_disable_interrupt
;
1175 if (dev
->init_reset
) {
1176 dev
->init_reset
= false;
1177 if (aac_src_soft_reset(dev
)) {
1178 aac_src_restart_adapter(dev
, 0, IOP_HWSOFT_RESET
);
1184 * Check to see if flash update is running.
1185 * Wait for the adapter to be up and running. Wait up to 5 minutes
1187 status
= src_readl(dev
, MUnit
.OMR
);
1188 if (status
& FLASH_UPD_PENDING
) {
1191 status
= src_readl(dev
, MUnit
.OMR
);
1192 if (time_after(jiffies
, start
+HZ
*FWUPD_TIMEOUT
)) {
1193 printk(KERN_ERR
"%s%d: adapter flash update failed.\n",
1194 dev
->name
, instance
);
1197 } while (!(status
& FLASH_UPD_SUCCESS
) &&
1198 !(status
& FLASH_UPD_FAILED
));
1199 /* Delay 10 seconds.
1200 * Because right now FW is doing a soft reset,
1201 * do not read scratch pad register at this time
1206 * Check to see if the board panic'd while booting.
1208 status
= src_readl(dev
, MUnit
.OMR
);
1209 if (status
& KERNEL_PANIC
) {
1210 if (aac_src_restart_adapter(dev
,
1211 aac_src_check_health(dev
), IOP_HWSOFT_RESET
))
1216 * Check to see if the board failed any self tests.
1218 status
= src_readl(dev
, MUnit
.OMR
);
1219 if (status
& SELF_TEST_FAILED
) {
1220 printk(KERN_ERR
"%s%d: adapter self-test failed.\n", dev
->name
, instance
);
1224 * Check to see if the monitor panic'd while booting.
1226 if (status
& MONITOR_PANIC
) {
1227 printk(KERN_ERR
"%s%d: adapter monitor panic.\n", dev
->name
, instance
);
1233 * Wait for the adapter to be up and running. Wait up to 3 minutes
1236 status
= src_readl(dev
, MUnit
.OMR
);
1237 if (status
== INVALID_OMR
)
1241 (status
& (KERNEL_PANIC
|SELF_TEST_FAILED
|MONITOR_PANIC
))) ||
1242 time_after(jiffies
, start
+HZ
*startup_timeout
)) {
1243 printk(KERN_ERR
"%s%d: adapter kernel failed to start, init status = %lx.\n",
1244 dev
->name
, instance
, status
);
1248 ((status
& (KERNEL_PANIC
|SELF_TEST_FAILED
|MONITOR_PANIC
)) ||
1249 time_after(jiffies
, start
+ HZ
*
1250 ((startup_timeout
> 60)
1251 ? (startup_timeout
- 60)
1252 : (startup_timeout
/ 2))))) {
1253 if (likely(!aac_src_restart_adapter(dev
,
1254 aac_src_check_health(dev
), IOP_HWSOFT_RESET
)))
1259 } while (!(status
& KERNEL_UP_AND_RUNNING
));
1261 if (restart
&& aac_commit
)
1264 * Fill in the common function dispatch table.
1266 dev
->a_ops
.adapter_interrupt
= aac_src_interrupt_adapter
;
1267 dev
->a_ops
.adapter_disable_int
= aac_src_disable_interrupt
;
1268 dev
->a_ops
.adapter_enable_int
= aac_src_disable_interrupt
;
1269 dev
->a_ops
.adapter_notify
= aac_src_notify_adapter
;
1270 dev
->a_ops
.adapter_sync_cmd
= src_sync_cmd
;
1271 dev
->a_ops
.adapter_check_health
= aac_src_check_health
;
1272 dev
->a_ops
.adapter_restart
= aac_src_restart_adapter
;
1273 dev
->a_ops
.adapter_start
= aac_src_start_adapter
;
1276 * First clear out all interrupts. Then enable the one's that we
1279 aac_adapter_comm(dev
, AAC_COMM_MESSAGE
);
1280 aac_adapter_disable_int(dev
);
1281 src_writel(dev
, MUnit
.ODR_C
, 0xffffffff);
1282 aac_adapter_enable_int(dev
);
1284 if (aac_init_adapter(dev
) == NULL
)
1286 if ((dev
->comm_interface
!= AAC_COMM_MESSAGE_TYPE2
) &&
1287 (dev
->comm_interface
!= AAC_COMM_MESSAGE_TYPE3
))
1289 if (dev
->msi_enabled
)
1290 aac_src_access_devreg(dev
, AAC_ENABLE_MSIX
);
1292 if (aac_acquire_irq(dev
))
1295 dev
->dbg_base
= pci_resource_start(dev
->pdev
, 2);
1296 dev
->dbg_base_mapped
= dev
->regs
.src
.bar1
;
1297 dev
->dbg_size
= AAC_MIN_SRCV_BAR1_SIZE
;
1298 dev
->a_ops
.adapter_enable_int
= aac_src_enable_interrupt_message
;
1300 aac_adapter_enable_int(dev
);
1302 if (!dev
->sync_mode
) {
1304 * Tell the adapter that all is configured, and it can
1305 * start accepting requests
1307 aac_src_start_adapter(dev
);
1316 void aac_src_access_devreg(struct aac_dev
*dev
, int mode
)
1321 case AAC_ENABLE_INTERRUPT
:
1324 dev
->OIMR
= (dev
->msi_enabled
?
1325 AAC_INT_ENABLE_TYPE1_MSIX
:
1326 AAC_INT_ENABLE_TYPE1_INTX
));
1329 case AAC_DISABLE_INTERRUPT
:
1332 dev
->OIMR
= AAC_INT_DISABLE_ALL
);
1335 case AAC_ENABLE_MSIX
:
1337 val
= src_readl(dev
, MUnit
.IDR
);
1339 src_writel(dev
, MUnit
.IDR
, val
);
1340 src_readl(dev
, MUnit
.IDR
);
1342 val
= PMC_ALL_INTERRUPT_BITS
;
1343 src_writel(dev
, MUnit
.IOAR
, val
);
1344 val
= src_readl(dev
, MUnit
.OIMR
);
1347 val
& (~(PMC_GLOBAL_INT_BIT2
| PMC_GLOBAL_INT_BIT0
)));
1350 case AAC_DISABLE_MSIX
:
1352 val
= src_readl(dev
, MUnit
.IDR
);
1354 src_writel(dev
, MUnit
.IDR
, val
);
1355 src_readl(dev
, MUnit
.IDR
);
1358 case AAC_CLEAR_AIF_BIT
:
1360 val
= src_readl(dev
, MUnit
.IDR
);
1362 src_writel(dev
, MUnit
.IDR
, val
);
1363 src_readl(dev
, MUnit
.IDR
);
1366 case AAC_CLEAR_SYNC_BIT
:
1368 val
= src_readl(dev
, MUnit
.IDR
);
1370 src_writel(dev
, MUnit
.IDR
, val
);
1371 src_readl(dev
, MUnit
.IDR
);
1374 case AAC_ENABLE_INTX
:
1376 val
= src_readl(dev
, MUnit
.IDR
);
1378 src_writel(dev
, MUnit
.IDR
, val
);
1379 src_readl(dev
, MUnit
.IDR
);
1381 val
= PMC_ALL_INTERRUPT_BITS
;
1382 src_writel(dev
, MUnit
.IOAR
, val
);
1383 src_readl(dev
, MUnit
.IOAR
);
1384 val
= src_readl(dev
, MUnit
.OIMR
);
1385 src_writel(dev
, MUnit
.OIMR
,
1386 val
& (~(PMC_GLOBAL_INT_BIT2
)));
1394 static int aac_src_get_sync_status(struct aac_dev
*dev
)
1399 msix_val
= src_readl(dev
, MUnit
.ODR_MSI
) & SRC_MSI_READ_MASK
? 1 : 0;
1401 if (!dev
->msi_enabled
) {
1403 * if Legacy int status indicates cmd is not complete
1404 * sample MSIx register to see if it indiactes cmd complete,
1405 * if yes set the controller in MSIx mode and consider cmd
1408 legacy_val
= src_readl(dev
, MUnit
.ODR_R
) >> SRC_ODR_SHIFT
;
1409 if (!(legacy_val
& 1) && msix_val
)
1410 dev
->msi_enabled
= 1;