2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc.
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
8 * Copyright (c) 2000-2010 Adaptec, Inc.
9 * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
28 * Abstract: This supports the initialization of the host adapter commuication interface.
29 * This is a platform dependent module for the pci cyclone board.
33 #include <linux/kernel.h>
34 #include <linux/init.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/spinlock.h>
38 #include <linux/slab.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/completion.h>
43 #include <scsi/scsi_host.h>
47 struct aac_common aac_config
= {
51 static inline int aac_is_msix_mode(struct aac_dev
*dev
)
55 status
= src_readl(dev
, MUnit
.OMR
);
56 return (status
& AAC_INT_MODE_MSIX
);
59 static inline void aac_change_to_intx(struct aac_dev
*dev
)
61 aac_src_access_devreg(dev
, AAC_DISABLE_MSIX
);
62 aac_src_access_devreg(dev
, AAC_ENABLE_INTX
);
65 static int aac_alloc_comm(struct aac_dev
*dev
, void **commaddr
, unsigned long commsize
, unsigned long commalign
)
68 unsigned long size
, align
;
69 const unsigned long fibsize
= dev
->max_fib_size
;
70 const unsigned long printfbufsiz
= 256;
71 unsigned long host_rrq_size
= 0;
72 struct aac_init
*init
;
74 unsigned long aac_max_hostphysmempages
;
76 if (dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE1
||
77 dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE2
)
78 host_rrq_size
= (dev
->scsi_host_ptr
->can_queue
79 + AAC_NUM_MGT_FIB
) * sizeof(u32
);
80 size
= fibsize
+ sizeof(struct aac_init
) + commsize
+
81 commalign
+ printfbufsiz
+ host_rrq_size
;
83 base
= pci_alloc_consistent(dev
->pdev
, size
, &phys
);
87 printk(KERN_ERR
"aacraid: unable to create mapping.\n");
90 dev
->comm_addr
= (void *)base
;
91 dev
->comm_phys
= phys
;
92 dev
->comm_size
= size
;
94 if (dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE1
||
95 dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE2
) {
96 dev
->host_rrq
= (u32
*)(base
+ fibsize
);
97 dev
->host_rrq_pa
= phys
+ fibsize
;
98 memset(dev
->host_rrq
, 0, host_rrq_size
);
101 dev
->init
= (struct aac_init
*)(base
+ fibsize
+ host_rrq_size
);
102 dev
->init_pa
= phys
+ fibsize
+ host_rrq_size
;
106 init
->InitStructRevision
= cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION
);
107 if (dev
->max_fib_size
!= sizeof(struct hw_fib
))
108 init
->InitStructRevision
= cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4
);
109 init
->Sa_MSIXVectors
= cpu_to_le32(SA_INIT_NUM_MSIXVECTORS
);
110 init
->fsrev
= cpu_to_le32(dev
->fsrev
);
113 * Adapter Fibs are the first thing allocated so that they
116 dev
->aif_base_va
= (struct hw_fib
*)base
;
118 init
->AdapterFibsVirtualAddress
= 0;
119 init
->AdapterFibsPhysicalAddress
= cpu_to_le32((u32
)phys
);
120 init
->AdapterFibsSize
= cpu_to_le32(fibsize
);
121 init
->AdapterFibAlign
= cpu_to_le32(sizeof(struct hw_fib
));
123 * number of 4k pages of host physical memory. The aacraid fw needs
124 * this number to be less than 4gb worth of pages. New firmware doesn't
125 * have any issues with the mapping system, but older Firmware did, and
126 * had *troubles* dealing with the math overloading past 32 bits, thus
127 * we must limit this field.
129 aac_max_hostphysmempages
= dma_get_required_mask(&dev
->pdev
->dev
) >> 12;
130 if (aac_max_hostphysmempages
< AAC_MAX_HOSTPHYSMEMPAGES
)
131 init
->HostPhysMemPages
= cpu_to_le32(aac_max_hostphysmempages
);
133 init
->HostPhysMemPages
= cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES
);
135 init
->InitFlags
= cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME
|
136 INITFLAGS_DRIVER_SUPPORTS_PM
);
137 init
->MaxIoCommands
= cpu_to_le32(dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
);
138 init
->MaxIoSize
= cpu_to_le32(dev
->scsi_host_ptr
->max_sectors
<< 9);
139 init
->MaxFibSize
= cpu_to_le32(dev
->max_fib_size
);
140 init
->MaxNumAif
= cpu_to_le32(dev
->max_num_aif
);
142 if (dev
->comm_interface
== AAC_COMM_MESSAGE
) {
143 init
->InitFlags
|= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED
);
144 dprintk((KERN_WARNING
"aacraid: New Comm Interface enabled\n"));
145 } else if (dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE1
) {
146 init
->InitStructRevision
= cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6
);
147 init
->InitFlags
|= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED
|
148 INITFLAGS_NEW_COMM_TYPE1_SUPPORTED
| INITFLAGS_FAST_JBOD_SUPPORTED
);
149 init
->HostRRQ_AddrHigh
= cpu_to_le32((u32
)((u64
)dev
->host_rrq_pa
>> 32));
150 init
->HostRRQ_AddrLow
= cpu_to_le32((u32
)(dev
->host_rrq_pa
& 0xffffffff));
151 dprintk((KERN_WARNING
"aacraid: New Comm Interface type1 enabled\n"));
152 } else if (dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE2
) {
153 init
->InitStructRevision
= cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_7
);
154 init
->InitFlags
|= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED
|
155 INITFLAGS_NEW_COMM_TYPE2_SUPPORTED
| INITFLAGS_FAST_JBOD_SUPPORTED
);
156 init
->HostRRQ_AddrHigh
= cpu_to_le32((u32
)((u64
)dev
->host_rrq_pa
>> 32));
157 init
->HostRRQ_AddrLow
= cpu_to_le32((u32
)(dev
->host_rrq_pa
& 0xffffffff));
158 /* number of MSI-X */
159 init
->Sa_MSIXVectors
= cpu_to_le32(dev
->max_msix
);
160 dprintk((KERN_WARNING
"aacraid: New Comm Interface type2 enabled\n"));
164 * Increment the base address by the amount already used
166 base
= base
+ fibsize
+ host_rrq_size
+ sizeof(struct aac_init
);
167 phys
= (dma_addr_t
)((ulong
)phys
+ fibsize
+ host_rrq_size
+
168 sizeof(struct aac_init
));
171 * Align the beginning of Headers to commalign
173 align
= (commalign
- ((uintptr_t)(base
) & (commalign
- 1)));
177 * Fill in addresses of the Comm Area Headers and Queues
180 init
->CommHeaderAddress
= cpu_to_le32((u32
)phys
);
182 * Increment the base address by the size of the CommArea
184 base
= base
+ commsize
;
185 phys
= phys
+ commsize
;
187 * Place the Printf buffer area after the Fast I/O comm area.
189 dev
->printfbuf
= (void *)base
;
190 init
->printfbuf
= cpu_to_le32(phys
);
191 init
->printfbufsiz
= cpu_to_le32(printfbufsiz
);
192 memset(base
, 0, printfbufsiz
);
196 static void aac_queue_init(struct aac_dev
* dev
, struct aac_queue
* q
, u32
*mem
, int qsize
)
198 atomic_set(&q
->numpending
, 0);
200 init_waitqueue_head(&q
->cmdready
);
201 INIT_LIST_HEAD(&q
->cmdq
);
202 init_waitqueue_head(&q
->qfull
);
203 spin_lock_init(&q
->lockdata
);
204 q
->lock
= &q
->lockdata
;
205 q
->headers
.producer
= (__le32
*)mem
;
206 q
->headers
.consumer
= (__le32
*)(mem
+1);
207 *(q
->headers
.producer
) = cpu_to_le32(qsize
);
208 *(q
->headers
.consumer
) = cpu_to_le32(qsize
);
213 * aac_send_shutdown - shutdown an adapter
214 * @dev: Adapter to shutdown
216 * This routine will send a VM_CloseAll (shutdown) request to the adapter.
219 int aac_send_shutdown(struct aac_dev
* dev
)
222 struct aac_close
*cmd
;
225 fibctx
= aac_fib_alloc(dev
);
228 aac_fib_init(fibctx
);
230 mutex_lock(&dev
->ioctl_mutex
);
231 dev
->adapter_shutdown
= 1;
232 mutex_unlock(&dev
->ioctl_mutex
);
234 cmd
= (struct aac_close
*) fib_data(fibctx
);
235 cmd
->command
= cpu_to_le32(VM_CloseAll
);
236 cmd
->cid
= cpu_to_le32(0xfffffffe);
238 status
= aac_fib_send(ContainerCommand
,
240 sizeof(struct aac_close
),
242 -2 /* Timeout silently */, 1,
246 aac_fib_complete(fibctx
);
247 /* FIB should be freed only after getting the response from the F/W */
248 if (status
!= -ERESTARTSYS
)
249 aac_fib_free(fibctx
);
250 if ((dev
->pdev
->device
== PMC_DEVICE_S7
||
251 dev
->pdev
->device
== PMC_DEVICE_S8
||
252 dev
->pdev
->device
== PMC_DEVICE_S9
) &&
254 aac_src_access_devreg(dev
, AAC_ENABLE_INTX
);
259 * aac_comm_init - Initialise FSA data structures
260 * @dev: Adapter to initialise
262 * Initializes the data structures that are required for the FSA commuication
263 * interface to operate.
265 * 1 - if we were able to init the commuication interface.
266 * 0 - If there were errors initing. This is a fatal error.
269 static int aac_comm_init(struct aac_dev
* dev
)
271 unsigned long hdrsize
= (sizeof(u32
) * NUMBER_OF_COMM_QUEUES
) * 2;
272 unsigned long queuesize
= sizeof(struct aac_entry
) * TOTAL_QUEUE_ENTRIES
;
274 struct aac_entry
* queues
;
276 struct aac_queue_block
* comm
= dev
->queues
;
278 * Now allocate and initialize the zone structures used as our
279 * pool of FIB context records. The size of the zone is based
280 * on the system memory size. We also initialize the mutex used
281 * to protect the zone.
283 spin_lock_init(&dev
->fib_lock
);
286 * Allocate the physically contiguous space for the commuication
290 size
= hdrsize
+ queuesize
;
292 if (!aac_alloc_comm(dev
, (void * *)&headers
, size
, QUEUE_ALIGNMENT
))
295 queues
= (struct aac_entry
*)(((ulong
)headers
) + hdrsize
);
297 /* Adapter to Host normal priority Command queue */
298 comm
->queue
[HostNormCmdQueue
].base
= queues
;
299 aac_queue_init(dev
, &comm
->queue
[HostNormCmdQueue
], headers
, HOST_NORM_CMD_ENTRIES
);
300 queues
+= HOST_NORM_CMD_ENTRIES
;
303 /* Adapter to Host high priority command queue */
304 comm
->queue
[HostHighCmdQueue
].base
= queues
;
305 aac_queue_init(dev
, &comm
->queue
[HostHighCmdQueue
], headers
, HOST_HIGH_CMD_ENTRIES
);
307 queues
+= HOST_HIGH_CMD_ENTRIES
;
310 /* Host to adapter normal priority command queue */
311 comm
->queue
[AdapNormCmdQueue
].base
= queues
;
312 aac_queue_init(dev
, &comm
->queue
[AdapNormCmdQueue
], headers
, ADAP_NORM_CMD_ENTRIES
);
314 queues
+= ADAP_NORM_CMD_ENTRIES
;
317 /* host to adapter high priority command queue */
318 comm
->queue
[AdapHighCmdQueue
].base
= queues
;
319 aac_queue_init(dev
, &comm
->queue
[AdapHighCmdQueue
], headers
, ADAP_HIGH_CMD_ENTRIES
);
321 queues
+= ADAP_HIGH_CMD_ENTRIES
;
324 /* adapter to host normal priority response queue */
325 comm
->queue
[HostNormRespQueue
].base
= queues
;
326 aac_queue_init(dev
, &comm
->queue
[HostNormRespQueue
], headers
, HOST_NORM_RESP_ENTRIES
);
327 queues
+= HOST_NORM_RESP_ENTRIES
;
330 /* adapter to host high priority response queue */
331 comm
->queue
[HostHighRespQueue
].base
= queues
;
332 aac_queue_init(dev
, &comm
->queue
[HostHighRespQueue
], headers
, HOST_HIGH_RESP_ENTRIES
);
334 queues
+= HOST_HIGH_RESP_ENTRIES
;
337 /* host to adapter normal priority response queue */
338 comm
->queue
[AdapNormRespQueue
].base
= queues
;
339 aac_queue_init(dev
, &comm
->queue
[AdapNormRespQueue
], headers
, ADAP_NORM_RESP_ENTRIES
);
341 queues
+= ADAP_NORM_RESP_ENTRIES
;
344 /* host to adapter high priority response queue */
345 comm
->queue
[AdapHighRespQueue
].base
= queues
;
346 aac_queue_init(dev
, &comm
->queue
[AdapHighRespQueue
], headers
, ADAP_HIGH_RESP_ENTRIES
);
348 comm
->queue
[AdapNormCmdQueue
].lock
= comm
->queue
[HostNormRespQueue
].lock
;
349 comm
->queue
[AdapHighCmdQueue
].lock
= comm
->queue
[HostHighRespQueue
].lock
;
350 comm
->queue
[AdapNormRespQueue
].lock
= comm
->queue
[HostNormCmdQueue
].lock
;
351 comm
->queue
[AdapHighRespQueue
].lock
= comm
->queue
[HostHighCmdQueue
].lock
;
356 void aac_define_int_mode(struct aac_dev
*dev
)
358 int i
, msi_count
, min_msix
;
361 /* max. vectors from GET_COMM_PREFERRED_SETTINGS */
362 if (dev
->max_msix
== 0 ||
363 dev
->pdev
->device
== PMC_DEVICE_S6
||
367 dev
->scsi_host_ptr
->can_queue
+
372 /* Don't bother allocating more MSI-X vectors than cpus */
373 msi_count
= min(dev
->max_msix
,
374 (unsigned int)num_online_cpus());
376 dev
->max_msix
= msi_count
;
378 if (msi_count
> AAC_MAX_MSIX
)
379 msi_count
= AAC_MAX_MSIX
;
381 for (i
= 0; i
< msi_count
; i
++)
382 dev
->msixentry
[i
].entry
= i
;
385 pci_find_capability(dev
->pdev
, PCI_CAP_ID_MSIX
)) {
387 i
= pci_enable_msix_range(dev
->pdev
,
392 dev
->msi_enabled
= 1;
395 dev
->msi_enabled
= 0;
396 dev_err(&dev
->pdev
->dev
,
397 "MSIX not supported!! Will try INTX 0x%x.\n", i
);
401 if (!dev
->msi_enabled
)
402 dev
->max_msix
= msi_count
= 1;
404 if (dev
->max_msix
> msi_count
)
405 dev
->max_msix
= msi_count
;
408 (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
) /
411 struct aac_dev
*aac_init_adapter(struct aac_dev
*dev
)
414 struct Scsi_Host
* host
= dev
->scsi_host_ptr
;
415 extern int aac_sync_mode
;
418 * Check the preferred comm settings, defaults from template.
420 dev
->management_fib_count
= 0;
421 spin_lock_init(&dev
->manage_lock
);
422 spin_lock_init(&dev
->sync_lock
);
423 spin_lock_init(&dev
->iq_lock
);
424 dev
->max_fib_size
= sizeof(struct hw_fib
);
425 dev
->sg_tablesize
= host
->sg_tablesize
= (dev
->max_fib_size
426 - sizeof(struct aac_fibhdr
)
427 - sizeof(struct aac_write
) + sizeof(struct sgentry
))
428 / sizeof(struct sgentry
);
429 dev
->comm_interface
= AAC_COMM_PRODUCER
;
430 dev
->raw_io_interface
= dev
->raw_io_64
= 0;
434 * Enable INTX mode, if not done already Enabled
436 if (aac_is_msix_mode(dev
)) {
437 aac_change_to_intx(dev
);
438 dev_info(&dev
->pdev
->dev
, "Changed firmware to INTX mode");
441 if ((!aac_adapter_sync_cmd(dev
, GET_ADAPTER_PROPERTIES
,
443 status
+0, status
+1, status
+2, status
+3, NULL
)) &&
444 (status
[0] == 0x00000001)) {
445 dev
->doorbell_mask
= status
[3];
446 if (status
[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64
))
448 dev
->sync_mode
= aac_sync_mode
;
449 if (dev
->a_ops
.adapter_comm
&&
450 (status
[1] & le32_to_cpu(AAC_OPT_NEW_COMM
))) {
451 dev
->comm_interface
= AAC_COMM_MESSAGE
;
452 dev
->raw_io_interface
= 1;
453 if ((status
[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1
))) {
454 /* driver supports TYPE1 (Tupelo) */
455 dev
->comm_interface
= AAC_COMM_MESSAGE_TYPE1
;
456 } else if ((status
[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE2
))) {
457 /* driver supports TYPE2 (Denali) */
458 dev
->comm_interface
= AAC_COMM_MESSAGE_TYPE2
;
459 } else if ((status
[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE4
)) ||
460 (status
[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE3
))) {
461 /* driver doesn't TYPE3 and TYPE4 */
462 /* switch to sync. mode */
463 dev
->comm_interface
= AAC_COMM_MESSAGE_TYPE2
;
467 if ((dev
->comm_interface
== AAC_COMM_MESSAGE
) &&
468 (status
[2] > dev
->base_size
)) {
469 aac_adapter_ioremap(dev
, 0);
470 dev
->base_size
= status
[2];
471 if (aac_adapter_ioremap(dev
, status
[2])) {
472 /* remap failed, go back ... */
473 dev
->comm_interface
= AAC_COMM_PRODUCER
;
474 if (aac_adapter_ioremap(dev
, AAC_MIN_FOOTPRINT_SIZE
)) {
476 "aacraid: unable to map adapter.\n");
483 dev
->msi_enabled
= 0;
484 dev
->adapter_shutdown
= 0;
485 if ((!aac_adapter_sync_cmd(dev
, GET_COMM_PREFERRED_SETTINGS
,
487 status
+0, status
+1, status
+2, status
+3, status
+4))
488 && (status
[0] == 0x00000001)) {
490 * status[1] >> 16 maximum command size in KB
491 * status[1] & 0xFFFF maximum FIB size
492 * status[2] >> 16 maximum SG elements to driver
493 * status[2] & 0xFFFF maximum SG elements from driver
494 * status[3] & 0xFFFF maximum number FIBs outstanding
496 host
->max_sectors
= (status
[1] >> 16) << 1;
497 /* Multiple of 32 for PMC */
498 dev
->max_fib_size
= status
[1] & 0xFFE0;
499 host
->sg_tablesize
= status
[2] >> 16;
500 dev
->sg_tablesize
= status
[2] & 0xFFFF;
501 if (dev
->pdev
->device
== PMC_DEVICE_S7
||
502 dev
->pdev
->device
== PMC_DEVICE_S8
||
503 dev
->pdev
->device
== PMC_DEVICE_S9
)
504 host
->can_queue
= ((status
[3] >> 16) ? (status
[3] >> 16) :
505 (status
[3] & 0xFFFF)) - AAC_NUM_MGT_FIB
;
507 host
->can_queue
= (status
[3] & 0xFFFF) - AAC_NUM_MGT_FIB
;
508 dev
->max_num_aif
= status
[4] & 0xFFFF;
511 * All these overrides are based on a fixed internal
512 * knowledge and understanding of existing adapters,
513 * acbsize should be set with caution.
515 if (acbsize
== 512) {
516 host
->max_sectors
= AAC_MAX_32BIT_SGBCOUNT
;
517 dev
->max_fib_size
= 512;
518 dev
->sg_tablesize
= host
->sg_tablesize
519 = (512 - sizeof(struct aac_fibhdr
)
520 - sizeof(struct aac_write
) + sizeof(struct sgentry
))
521 / sizeof(struct sgentry
);
522 host
->can_queue
= AAC_NUM_IO_FIB
;
523 } else if (acbsize
== 2048) {
524 host
->max_sectors
= 512;
525 dev
->max_fib_size
= 2048;
526 host
->sg_tablesize
= 65;
527 dev
->sg_tablesize
= 81;
528 host
->can_queue
= 512 - AAC_NUM_MGT_FIB
;
529 } else if (acbsize
== 4096) {
530 host
->max_sectors
= 1024;
531 dev
->max_fib_size
= 4096;
532 host
->sg_tablesize
= 129;
533 dev
->sg_tablesize
= 166;
534 host
->can_queue
= 256 - AAC_NUM_MGT_FIB
;
535 } else if (acbsize
== 8192) {
536 host
->max_sectors
= 2048;
537 dev
->max_fib_size
= 8192;
538 host
->sg_tablesize
= 257;
539 dev
->sg_tablesize
= 337;
540 host
->can_queue
= 128 - AAC_NUM_MGT_FIB
;
541 } else if (acbsize
> 0) {
542 printk("Illegal acbsize=%d ignored\n", acbsize
);
548 if (numacb
< host
->can_queue
)
549 host
->can_queue
= numacb
;
551 printk("numacb=%d ignored\n", numacb
);
555 if (host
->can_queue
> AAC_NUM_IO_FIB
)
556 host
->can_queue
= AAC_NUM_IO_FIB
;
558 if (dev
->pdev
->device
== PMC_DEVICE_S6
||
559 dev
->pdev
->device
== PMC_DEVICE_S7
||
560 dev
->pdev
->device
== PMC_DEVICE_S8
||
561 dev
->pdev
->device
== PMC_DEVICE_S9
)
562 aac_define_int_mode(dev
);
564 * Ok now init the communication subsystem
567 dev
->queues
= kzalloc(sizeof(struct aac_queue_block
), GFP_KERNEL
);
568 if (dev
->queues
== NULL
) {
569 printk(KERN_ERR
"Error could not allocate comm region.\n");
573 if (aac_comm_init(dev
)<0){
578 * Initialize the list of fibs
580 if (aac_fib_setup(dev
) < 0) {
585 INIT_LIST_HEAD(&dev
->fib_list
);
586 INIT_LIST_HEAD(&dev
->sync_fib_list
);