2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc.
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
8 * Copyright (c) 2000-2010 Adaptec, Inc.
9 * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
28 * Abstract: This supports the initialization of the host adapter commuication interface.
29 * This is a platform dependent module for the pci cyclone board.
33 #include <linux/kernel.h>
34 #include <linux/init.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/spinlock.h>
38 #include <linux/slab.h>
39 #include <linux/blkdev.h>
40 #include <linux/completion.h>
42 #include <scsi/scsi_host.h>
46 struct aac_common aac_config
= {
50 static int aac_alloc_comm(struct aac_dev
*dev
, void **commaddr
, unsigned long commsize
, unsigned long commalign
)
53 unsigned long size
, align
;
54 const unsigned long fibsize
= dev
->max_fib_size
;
55 const unsigned long printfbufsiz
= 256;
56 unsigned long host_rrq_size
= 0;
57 struct aac_init
*init
;
59 unsigned long aac_max_hostphysmempages
;
61 if (dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE1
||
62 dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE2
)
63 host_rrq_size
= (dev
->scsi_host_ptr
->can_queue
64 + AAC_NUM_MGT_FIB
) * sizeof(u32
);
65 size
= fibsize
+ sizeof(struct aac_init
) + commsize
+
66 commalign
+ printfbufsiz
+ host_rrq_size
;
68 base
= pci_alloc_consistent(dev
->pdev
, size
, &phys
);
72 printk(KERN_ERR
"aacraid: unable to create mapping.\n");
75 dev
->comm_addr
= (void *)base
;
76 dev
->comm_phys
= phys
;
77 dev
->comm_size
= size
;
79 if (dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE1
||
80 dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE2
) {
81 dev
->host_rrq
= (u32
*)(base
+ fibsize
);
82 dev
->host_rrq_pa
= phys
+ fibsize
;
83 memset(dev
->host_rrq
, 0, host_rrq_size
);
86 dev
->init
= (struct aac_init
*)(base
+ fibsize
+ host_rrq_size
);
87 dev
->init_pa
= phys
+ fibsize
+ host_rrq_size
;
91 init
->InitStructRevision
= cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION
);
92 if (dev
->max_fib_size
!= sizeof(struct hw_fib
))
93 init
->InitStructRevision
= cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4
);
94 init
->Sa_MSIXVectors
= cpu_to_le32(Sa_MINIPORT_REVISION
);
95 init
->fsrev
= cpu_to_le32(dev
->fsrev
);
98 * Adapter Fibs are the first thing allocated so that they
101 dev
->aif_base_va
= (struct hw_fib
*)base
;
103 init
->AdapterFibsVirtualAddress
= 0;
104 init
->AdapterFibsPhysicalAddress
= cpu_to_le32((u32
)phys
);
105 init
->AdapterFibsSize
= cpu_to_le32(fibsize
);
106 init
->AdapterFibAlign
= cpu_to_le32(sizeof(struct hw_fib
));
108 * number of 4k pages of host physical memory. The aacraid fw needs
109 * this number to be less than 4gb worth of pages. New firmware doesn't
110 * have any issues with the mapping system, but older Firmware did, and
111 * had *troubles* dealing with the math overloading past 32 bits, thus
112 * we must limit this field.
114 aac_max_hostphysmempages
= dma_get_required_mask(&dev
->pdev
->dev
) >> 12;
115 if (aac_max_hostphysmempages
< AAC_MAX_HOSTPHYSMEMPAGES
)
116 init
->HostPhysMemPages
= cpu_to_le32(aac_max_hostphysmempages
);
118 init
->HostPhysMemPages
= cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES
);
120 init
->InitFlags
= cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME
|
121 INITFLAGS_DRIVER_SUPPORTS_PM
);
122 init
->MaxIoCommands
= cpu_to_le32(dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
);
123 init
->MaxIoSize
= cpu_to_le32(dev
->scsi_host_ptr
->max_sectors
<< 9);
124 init
->MaxFibSize
= cpu_to_le32(dev
->max_fib_size
);
125 init
->MaxNumAif
= cpu_to_le32(dev
->max_num_aif
);
127 if (dev
->comm_interface
== AAC_COMM_MESSAGE
) {
128 init
->InitFlags
|= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED
);
129 dprintk((KERN_WARNING
"aacraid: New Comm Interface enabled\n"));
130 } else if (dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE1
) {
131 init
->InitStructRevision
= cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6
);
132 init
->InitFlags
|= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED
|
133 INITFLAGS_NEW_COMM_TYPE1_SUPPORTED
| INITFLAGS_FAST_JBOD_SUPPORTED
);
134 init
->HostRRQ_AddrHigh
= cpu_to_le32((u32
)((u64
)dev
->host_rrq_pa
>> 32));
135 init
->HostRRQ_AddrLow
= cpu_to_le32((u32
)(dev
->host_rrq_pa
& 0xffffffff));
136 dprintk((KERN_WARNING
"aacraid: New Comm Interface type1 enabled\n"));
137 } else if (dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE2
) {
138 init
->InitStructRevision
= cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_7
);
139 init
->InitFlags
|= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED
|
140 INITFLAGS_NEW_COMM_TYPE2_SUPPORTED
| INITFLAGS_FAST_JBOD_SUPPORTED
);
141 init
->HostRRQ_AddrHigh
= cpu_to_le32((u32
)((u64
)dev
->host_rrq_pa
>> 32));
142 init
->HostRRQ_AddrLow
= cpu_to_le32((u32
)(dev
->host_rrq_pa
& 0xffffffff));
143 /* number of MSI-X */
144 init
->Sa_MSIXVectors
= cpu_to_le32(dev
->max_msix
);
145 dprintk((KERN_WARNING
"aacraid: New Comm Interface type2 enabled\n"));
149 * Increment the base address by the amount already used
151 base
= base
+ fibsize
+ host_rrq_size
+ sizeof(struct aac_init
);
152 phys
= (dma_addr_t
)((ulong
)phys
+ fibsize
+ host_rrq_size
+
153 sizeof(struct aac_init
));
156 * Align the beginning of Headers to commalign
158 align
= (commalign
- ((uintptr_t)(base
) & (commalign
- 1)));
162 * Fill in addresses of the Comm Area Headers and Queues
165 init
->CommHeaderAddress
= cpu_to_le32((u32
)phys
);
167 * Increment the base address by the size of the CommArea
169 base
= base
+ commsize
;
170 phys
= phys
+ commsize
;
172 * Place the Printf buffer area after the Fast I/O comm area.
174 dev
->printfbuf
= (void *)base
;
175 init
->printfbuf
= cpu_to_le32(phys
);
176 init
->printfbufsiz
= cpu_to_le32(printfbufsiz
);
177 memset(base
, 0, printfbufsiz
);
181 static void aac_queue_init(struct aac_dev
* dev
, struct aac_queue
* q
, u32
*mem
, int qsize
)
183 atomic_set(&q
->numpending
, 0);
185 init_waitqueue_head(&q
->cmdready
);
186 INIT_LIST_HEAD(&q
->cmdq
);
187 init_waitqueue_head(&q
->qfull
);
188 spin_lock_init(&q
->lockdata
);
189 q
->lock
= &q
->lockdata
;
190 q
->headers
.producer
= (__le32
*)mem
;
191 q
->headers
.consumer
= (__le32
*)(mem
+1);
192 *(q
->headers
.producer
) = cpu_to_le32(qsize
);
193 *(q
->headers
.consumer
) = cpu_to_le32(qsize
);
198 * aac_send_shutdown - shutdown an adapter
199 * @dev: Adapter to shutdown
201 * This routine will send a VM_CloseAll (shutdown) request to the adapter.
204 int aac_send_shutdown(struct aac_dev
* dev
)
207 struct aac_close
*cmd
;
210 fibctx
= aac_fib_alloc(dev
);
213 aac_fib_init(fibctx
);
215 cmd
= (struct aac_close
*) fib_data(fibctx
);
217 cmd
->command
= cpu_to_le32(VM_CloseAll
);
218 cmd
->cid
= cpu_to_le32(0xfffffffe);
220 status
= aac_fib_send(ContainerCommand
,
222 sizeof(struct aac_close
),
224 -2 /* Timeout silently */, 1,
228 aac_fib_complete(fibctx
);
229 /* FIB should be freed only after getting the response from the F/W */
230 if (status
!= -ERESTARTSYS
)
231 aac_fib_free(fibctx
);
232 dev
->adapter_shutdown
= 1;
233 if ((dev
->pdev
->device
== PMC_DEVICE_S7
||
234 dev
->pdev
->device
== PMC_DEVICE_S8
||
235 dev
->pdev
->device
== PMC_DEVICE_S9
) &&
237 aac_src_access_devreg(dev
, AAC_ENABLE_INTX
);
242 * aac_comm_init - Initialise FSA data structures
243 * @dev: Adapter to initialise
245 * Initializes the data structures that are required for the FSA commuication
246 * interface to operate.
248 * 1 - if we were able to init the commuication interface.
249 * 0 - If there were errors initing. This is a fatal error.
252 static int aac_comm_init(struct aac_dev
* dev
)
254 unsigned long hdrsize
= (sizeof(u32
) * NUMBER_OF_COMM_QUEUES
) * 2;
255 unsigned long queuesize
= sizeof(struct aac_entry
) * TOTAL_QUEUE_ENTRIES
;
257 struct aac_entry
* queues
;
259 struct aac_queue_block
* comm
= dev
->queues
;
261 * Now allocate and initialize the zone structures used as our
262 * pool of FIB context records. The size of the zone is based
263 * on the system memory size. We also initialize the mutex used
264 * to protect the zone.
266 spin_lock_init(&dev
->fib_lock
);
269 * Allocate the physically contiguous space for the commuication
273 size
= hdrsize
+ queuesize
;
275 if (!aac_alloc_comm(dev
, (void * *)&headers
, size
, QUEUE_ALIGNMENT
))
278 queues
= (struct aac_entry
*)(((ulong
)headers
) + hdrsize
);
280 /* Adapter to Host normal priority Command queue */
281 comm
->queue
[HostNormCmdQueue
].base
= queues
;
282 aac_queue_init(dev
, &comm
->queue
[HostNormCmdQueue
], headers
, HOST_NORM_CMD_ENTRIES
);
283 queues
+= HOST_NORM_CMD_ENTRIES
;
286 /* Adapter to Host high priority command queue */
287 comm
->queue
[HostHighCmdQueue
].base
= queues
;
288 aac_queue_init(dev
, &comm
->queue
[HostHighCmdQueue
], headers
, HOST_HIGH_CMD_ENTRIES
);
290 queues
+= HOST_HIGH_CMD_ENTRIES
;
293 /* Host to adapter normal priority command queue */
294 comm
->queue
[AdapNormCmdQueue
].base
= queues
;
295 aac_queue_init(dev
, &comm
->queue
[AdapNormCmdQueue
], headers
, ADAP_NORM_CMD_ENTRIES
);
297 queues
+= ADAP_NORM_CMD_ENTRIES
;
300 /* host to adapter high priority command queue */
301 comm
->queue
[AdapHighCmdQueue
].base
= queues
;
302 aac_queue_init(dev
, &comm
->queue
[AdapHighCmdQueue
], headers
, ADAP_HIGH_CMD_ENTRIES
);
304 queues
+= ADAP_HIGH_CMD_ENTRIES
;
307 /* adapter to host normal priority response queue */
308 comm
->queue
[HostNormRespQueue
].base
= queues
;
309 aac_queue_init(dev
, &comm
->queue
[HostNormRespQueue
], headers
, HOST_NORM_RESP_ENTRIES
);
310 queues
+= HOST_NORM_RESP_ENTRIES
;
313 /* adapter to host high priority response queue */
314 comm
->queue
[HostHighRespQueue
].base
= queues
;
315 aac_queue_init(dev
, &comm
->queue
[HostHighRespQueue
], headers
, HOST_HIGH_RESP_ENTRIES
);
317 queues
+= HOST_HIGH_RESP_ENTRIES
;
320 /* host to adapter normal priority response queue */
321 comm
->queue
[AdapNormRespQueue
].base
= queues
;
322 aac_queue_init(dev
, &comm
->queue
[AdapNormRespQueue
], headers
, ADAP_NORM_RESP_ENTRIES
);
324 queues
+= ADAP_NORM_RESP_ENTRIES
;
327 /* host to adapter high priority response queue */
328 comm
->queue
[AdapHighRespQueue
].base
= queues
;
329 aac_queue_init(dev
, &comm
->queue
[AdapHighRespQueue
], headers
, ADAP_HIGH_RESP_ENTRIES
);
331 comm
->queue
[AdapNormCmdQueue
].lock
= comm
->queue
[HostNormRespQueue
].lock
;
332 comm
->queue
[AdapHighCmdQueue
].lock
= comm
->queue
[HostHighRespQueue
].lock
;
333 comm
->queue
[AdapNormRespQueue
].lock
= comm
->queue
[HostNormCmdQueue
].lock
;
334 comm
->queue
[AdapHighRespQueue
].lock
= comm
->queue
[HostHighCmdQueue
].lock
;
339 void aac_define_int_mode(struct aac_dev
*dev
)
341 int i
, msi_count
, min_msix
;
344 /* max. vectors from GET_COMM_PREFERRED_SETTINGS */
345 if (dev
->max_msix
== 0 ||
346 dev
->pdev
->device
== PMC_DEVICE_S6
||
350 dev
->scsi_host_ptr
->can_queue
+
355 /* Don't bother allocating more MSI-X vectors than cpus */
356 msi_count
= min(dev
->max_msix
,
357 (unsigned int)num_online_cpus());
359 dev
->max_msix
= msi_count
;
361 if (msi_count
> AAC_MAX_MSIX
)
362 msi_count
= AAC_MAX_MSIX
;
364 for (i
= 0; i
< msi_count
; i
++)
365 dev
->msixentry
[i
].entry
= i
;
368 pci_find_capability(dev
->pdev
, PCI_CAP_ID_MSIX
)) {
370 i
= pci_enable_msix_range(dev
->pdev
,
375 dev
->msi_enabled
= 1;
378 dev
->msi_enabled
= 0;
379 printk(KERN_ERR
"%s%d: MSIX not supported!! Will try MSI 0x%x.\n",
380 dev
->name
, dev
->id
, i
);
384 if (!dev
->msi_enabled
) {
386 i
= pci_enable_msi(dev
->pdev
);
389 dev
->msi_enabled
= 1;
392 printk(KERN_ERR
"%s%d: MSI not supported!! Will try INTx 0x%x.\n",
393 dev
->name
, dev
->id
, i
);
397 if (!dev
->msi_enabled
)
398 dev
->max_msix
= msi_count
= 1;
400 if (dev
->max_msix
> msi_count
)
401 dev
->max_msix
= msi_count
;
404 (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
) /
407 struct aac_dev
*aac_init_adapter(struct aac_dev
*dev
)
410 struct Scsi_Host
* host
= dev
->scsi_host_ptr
;
411 extern int aac_sync_mode
;
414 * Check the preferred comm settings, defaults from template.
416 dev
->management_fib_count
= 0;
417 spin_lock_init(&dev
->manage_lock
);
418 spin_lock_init(&dev
->sync_lock
);
419 spin_lock_init(&dev
->iq_lock
);
420 dev
->max_fib_size
= sizeof(struct hw_fib
);
421 dev
->sg_tablesize
= host
->sg_tablesize
= (dev
->max_fib_size
422 - sizeof(struct aac_fibhdr
)
423 - sizeof(struct aac_write
) + sizeof(struct sgentry
))
424 / sizeof(struct sgentry
);
425 dev
->comm_interface
= AAC_COMM_PRODUCER
;
426 dev
->raw_io_interface
= dev
->raw_io_64
= 0;
428 if ((!aac_adapter_sync_cmd(dev
, GET_ADAPTER_PROPERTIES
,
430 status
+0, status
+1, status
+2, status
+3, NULL
)) &&
431 (status
[0] == 0x00000001)) {
432 dev
->doorbell_mask
= status
[3];
433 if (status
[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64
))
435 dev
->sync_mode
= aac_sync_mode
;
436 if (dev
->a_ops
.adapter_comm
&&
437 (status
[1] & le32_to_cpu(AAC_OPT_NEW_COMM
))) {
438 dev
->comm_interface
= AAC_COMM_MESSAGE
;
439 dev
->raw_io_interface
= 1;
440 if ((status
[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1
))) {
441 /* driver supports TYPE1 (Tupelo) */
442 dev
->comm_interface
= AAC_COMM_MESSAGE_TYPE1
;
443 } else if ((status
[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE2
))) {
444 /* driver supports TYPE2 (Denali) */
445 dev
->comm_interface
= AAC_COMM_MESSAGE_TYPE2
;
446 } else if ((status
[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE4
)) ||
447 (status
[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE3
))) {
448 /* driver doesn't TYPE3 and TYPE4 */
449 /* switch to sync. mode */
450 dev
->comm_interface
= AAC_COMM_MESSAGE_TYPE2
;
454 if ((dev
->comm_interface
== AAC_COMM_MESSAGE
) &&
455 (status
[2] > dev
->base_size
)) {
456 aac_adapter_ioremap(dev
, 0);
457 dev
->base_size
= status
[2];
458 if (aac_adapter_ioremap(dev
, status
[2])) {
459 /* remap failed, go back ... */
460 dev
->comm_interface
= AAC_COMM_PRODUCER
;
461 if (aac_adapter_ioremap(dev
, AAC_MIN_FOOTPRINT_SIZE
)) {
463 "aacraid: unable to map adapter.\n");
470 dev
->msi_enabled
= 0;
471 dev
->adapter_shutdown
= 0;
472 if ((!aac_adapter_sync_cmd(dev
, GET_COMM_PREFERRED_SETTINGS
,
474 status
+0, status
+1, status
+2, status
+3, status
+4))
475 && (status
[0] == 0x00000001)) {
477 * status[1] >> 16 maximum command size in KB
478 * status[1] & 0xFFFF maximum FIB size
479 * status[2] >> 16 maximum SG elements to driver
480 * status[2] & 0xFFFF maximum SG elements from driver
481 * status[3] & 0xFFFF maximum number FIBs outstanding
483 host
->max_sectors
= (status
[1] >> 16) << 1;
484 /* Multiple of 32 for PMC */
485 dev
->max_fib_size
= status
[1] & 0xFFE0;
486 host
->sg_tablesize
= status
[2] >> 16;
487 dev
->sg_tablesize
= status
[2] & 0xFFFF;
488 if (dev
->pdev
->device
== PMC_DEVICE_S7
||
489 dev
->pdev
->device
== PMC_DEVICE_S8
||
490 dev
->pdev
->device
== PMC_DEVICE_S9
)
491 host
->can_queue
= ((status
[3] >> 16) ? (status
[3] >> 16) :
492 (status
[3] & 0xFFFF)) - AAC_NUM_MGT_FIB
;
494 host
->can_queue
= (status
[3] & 0xFFFF) - AAC_NUM_MGT_FIB
;
495 dev
->max_num_aif
= status
[4] & 0xFFFF;
498 * All these overrides are based on a fixed internal
499 * knowledge and understanding of existing adapters,
500 * acbsize should be set with caution.
502 if (acbsize
== 512) {
503 host
->max_sectors
= AAC_MAX_32BIT_SGBCOUNT
;
504 dev
->max_fib_size
= 512;
505 dev
->sg_tablesize
= host
->sg_tablesize
506 = (512 - sizeof(struct aac_fibhdr
)
507 - sizeof(struct aac_write
) + sizeof(struct sgentry
))
508 / sizeof(struct sgentry
);
509 host
->can_queue
= AAC_NUM_IO_FIB
;
510 } else if (acbsize
== 2048) {
511 host
->max_sectors
= 512;
512 dev
->max_fib_size
= 2048;
513 host
->sg_tablesize
= 65;
514 dev
->sg_tablesize
= 81;
515 host
->can_queue
= 512 - AAC_NUM_MGT_FIB
;
516 } else if (acbsize
== 4096) {
517 host
->max_sectors
= 1024;
518 dev
->max_fib_size
= 4096;
519 host
->sg_tablesize
= 129;
520 dev
->sg_tablesize
= 166;
521 host
->can_queue
= 256 - AAC_NUM_MGT_FIB
;
522 } else if (acbsize
== 8192) {
523 host
->max_sectors
= 2048;
524 dev
->max_fib_size
= 8192;
525 host
->sg_tablesize
= 257;
526 dev
->sg_tablesize
= 337;
527 host
->can_queue
= 128 - AAC_NUM_MGT_FIB
;
528 } else if (acbsize
> 0) {
529 printk("Illegal acbsize=%d ignored\n", acbsize
);
535 if (numacb
< host
->can_queue
)
536 host
->can_queue
= numacb
;
538 printk("numacb=%d ignored\n", numacb
);
542 if (host
->can_queue
> AAC_NUM_IO_FIB
)
543 host
->can_queue
= AAC_NUM_IO_FIB
;
545 if (dev
->pdev
->device
== PMC_DEVICE_S6
||
546 dev
->pdev
->device
== PMC_DEVICE_S7
||
547 dev
->pdev
->device
== PMC_DEVICE_S8
||
548 dev
->pdev
->device
== PMC_DEVICE_S9
)
549 aac_define_int_mode(dev
);
551 * Ok now init the communication subsystem
554 dev
->queues
= kzalloc(sizeof(struct aac_queue_block
), GFP_KERNEL
);
555 if (dev
->queues
== NULL
) {
556 printk(KERN_ERR
"Error could not allocate comm region.\n");
560 if (aac_comm_init(dev
)<0){
565 * Initialize the list of fibs
567 if (aac_fib_setup(dev
) < 0) {
572 INIT_LIST_HEAD(&dev
->fib_list
);
573 INIT_LIST_HEAD(&dev
->sync_fib_list
);