[PATCH] s390: debug feature changes
[linux-2.6/verdex.git] / drivers / scsi / aic7xxx / aic79xx_osm.h
blob7823e52e99abb8e47f6a7d8148b89ab28a7a47b6
1 /*
2 * Adaptec AIC79xx device driver for Linux.
4 * Copyright (c) 2000-2001 Adaptec Inc.
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
39 * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_osm.h#137 $
42 #ifndef _AIC79XX_LINUX_H_
43 #define _AIC79XX_LINUX_H_
45 #include <linux/types.h>
46 #include <linux/blkdev.h>
47 #include <linux/delay.h>
48 #include <linux/ioport.h>
49 #include <linux/pci.h>
50 #include <linux/smp_lock.h>
51 #include <linux/version.h>
52 #include <linux/module.h>
53 #include <asm/byteorder.h>
54 #include <asm/io.h>
56 #include <linux/interrupt.h> /* For tasklet support. */
57 #include <linux/config.h>
58 #include <linux/slab.h>
60 /* Core SCSI definitions */
61 #define AIC_LIB_PREFIX ahd
62 #include "scsi.h"
63 #include <scsi/scsi_host.h>
65 /* Name space conflict with BSD queue macros */
66 #ifdef LIST_HEAD
67 #undef LIST_HEAD
68 #endif
70 #include "cam.h"
71 #include "queue.h"
72 #include "scsi_message.h"
73 #include "scsi_iu.h"
74 #include "aiclib.h"
76 /*********************************** Debugging ********************************/
77 #ifdef CONFIG_AIC79XX_DEBUG_ENABLE
78 #ifdef CONFIG_AIC79XX_DEBUG_MASK
79 #define AHD_DEBUG 1
80 #define AHD_DEBUG_OPTS CONFIG_AIC79XX_DEBUG_MASK
81 #else
83 * Compile in debugging code, but do not enable any printfs.
85 #define AHD_DEBUG 1
86 #define AHD_DEBUG_OPTS 0
87 #endif
88 /* No debugging code. */
89 #endif
91 /********************************** Misc Macros *******************************/
92 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
93 #define powerof2(x) ((((x)-1)&(x))==0)
95 /************************* Forward Declarations *******************************/
96 struct ahd_softc;
97 typedef struct pci_dev *ahd_dev_softc_t;
98 typedef Scsi_Cmnd *ahd_io_ctx_t;
100 /******************************* Byte Order ***********************************/
101 #define ahd_htobe16(x) cpu_to_be16(x)
102 #define ahd_htobe32(x) cpu_to_be32(x)
103 #define ahd_htobe64(x) cpu_to_be64(x)
104 #define ahd_htole16(x) cpu_to_le16(x)
105 #define ahd_htole32(x) cpu_to_le32(x)
106 #define ahd_htole64(x) cpu_to_le64(x)
108 #define ahd_be16toh(x) be16_to_cpu(x)
109 #define ahd_be32toh(x) be32_to_cpu(x)
110 #define ahd_be64toh(x) be64_to_cpu(x)
111 #define ahd_le16toh(x) le16_to_cpu(x)
112 #define ahd_le32toh(x) le32_to_cpu(x)
113 #define ahd_le64toh(x) le64_to_cpu(x)
115 /************************* Configuration Data *********************************/
116 extern uint32_t aic79xx_allow_memio;
117 extern int aic79xx_detect_complete;
118 extern Scsi_Host_Template aic79xx_driver_template;
120 /***************************** Bus Space/DMA **********************************/
122 typedef uint32_t bus_size_t;
124 typedef enum {
125 BUS_SPACE_MEMIO,
126 BUS_SPACE_PIO
127 } bus_space_tag_t;
129 typedef union {
130 u_long ioport;
131 volatile uint8_t __iomem *maddr;
132 } bus_space_handle_t;
134 typedef struct bus_dma_segment
136 dma_addr_t ds_addr;
137 bus_size_t ds_len;
138 } bus_dma_segment_t;
140 struct ahd_linux_dma_tag
142 bus_size_t alignment;
143 bus_size_t boundary;
144 bus_size_t maxsize;
146 typedef struct ahd_linux_dma_tag* bus_dma_tag_t;
148 struct ahd_linux_dmamap
150 dma_addr_t bus_addr;
152 typedef struct ahd_linux_dmamap* bus_dmamap_t;
154 typedef int bus_dma_filter_t(void*, dma_addr_t);
155 typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int);
157 #define BUS_DMA_WAITOK 0x0
158 #define BUS_DMA_NOWAIT 0x1
159 #define BUS_DMA_ALLOCNOW 0x2
160 #define BUS_DMA_LOAD_SEGS 0x4 /*
161 * Argument is an S/G list not
162 * a single buffer.
165 #define BUS_SPACE_MAXADDR 0xFFFFFFFF
166 #define BUS_SPACE_MAXADDR_32BIT 0xFFFFFFFF
167 #define BUS_SPACE_MAXSIZE_32BIT 0xFFFFFFFF
169 int ahd_dma_tag_create(struct ahd_softc *, bus_dma_tag_t /*parent*/,
170 bus_size_t /*alignment*/, bus_size_t /*boundary*/,
171 dma_addr_t /*lowaddr*/, dma_addr_t /*highaddr*/,
172 bus_dma_filter_t*/*filter*/, void */*filterarg*/,
173 bus_size_t /*maxsize*/, int /*nsegments*/,
174 bus_size_t /*maxsegsz*/, int /*flags*/,
175 bus_dma_tag_t */*dma_tagp*/);
177 void ahd_dma_tag_destroy(struct ahd_softc *, bus_dma_tag_t /*tag*/);
179 int ahd_dmamem_alloc(struct ahd_softc *, bus_dma_tag_t /*dmat*/,
180 void** /*vaddr*/, int /*flags*/,
181 bus_dmamap_t* /*mapp*/);
183 void ahd_dmamem_free(struct ahd_softc *, bus_dma_tag_t /*dmat*/,
184 void* /*vaddr*/, bus_dmamap_t /*map*/);
186 void ahd_dmamap_destroy(struct ahd_softc *, bus_dma_tag_t /*tag*/,
187 bus_dmamap_t /*map*/);
189 int ahd_dmamap_load(struct ahd_softc *ahd, bus_dma_tag_t /*dmat*/,
190 bus_dmamap_t /*map*/, void * /*buf*/,
191 bus_size_t /*buflen*/, bus_dmamap_callback_t *,
192 void */*callback_arg*/, int /*flags*/);
194 int ahd_dmamap_unload(struct ahd_softc *, bus_dma_tag_t, bus_dmamap_t);
197 * Operations performed by ahd_dmamap_sync().
199 #define BUS_DMASYNC_PREREAD 0x01 /* pre-read synchronization */
200 #define BUS_DMASYNC_POSTREAD 0x02 /* post-read synchronization */
201 #define BUS_DMASYNC_PREWRITE 0x04 /* pre-write synchronization */
202 #define BUS_DMASYNC_POSTWRITE 0x08 /* post-write synchronization */
205 * XXX
206 * ahd_dmamap_sync is only used on buffers allocated with
207 * the pci_alloc_consistent() API. Although I'm not sure how
208 * this works on architectures with a write buffer, Linux does
209 * not have an API to sync "coherent" memory. Perhaps we need
210 * to do an mb()?
212 #define ahd_dmamap_sync(ahd, dma_tag, dmamap, offset, len, op)
214 /************************** Timer DataStructures ******************************/
215 typedef struct timer_list ahd_timer_t;
217 /********************************** Includes **********************************/
218 #ifdef CONFIG_AIC79XX_REG_PRETTY_PRINT
219 #define AIC_DEBUG_REGISTERS 1
220 #else
221 #define AIC_DEBUG_REGISTERS 0
222 #endif
223 #include "aic79xx.h"
225 /***************************** Timer Facilities *******************************/
226 #define ahd_timer_init init_timer
227 #define ahd_timer_stop del_timer_sync
228 typedef void ahd_linux_callback_t (u_long);
229 static __inline void ahd_timer_reset(ahd_timer_t *timer, u_int usec,
230 ahd_callback_t *func, void *arg);
231 static __inline void ahd_scb_timer_reset(struct scb *scb, u_int usec);
233 static __inline void
234 ahd_timer_reset(ahd_timer_t *timer, u_int usec, ahd_callback_t *func, void *arg)
236 struct ahd_softc *ahd;
238 ahd = (struct ahd_softc *)arg;
239 del_timer(timer);
240 timer->data = (u_long)arg;
241 timer->expires = jiffies + (usec * HZ)/1000000;
242 timer->function = (ahd_linux_callback_t*)func;
243 add_timer(timer);
246 static __inline void
247 ahd_scb_timer_reset(struct scb *scb, u_int usec)
249 mod_timer(&scb->io_ctx->eh_timeout, jiffies + (usec * HZ)/1000000);
252 /***************************** SMP support ************************************/
253 #include <linux/spinlock.h>
255 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) || defined(SCSI_HAS_HOST_LOCK))
256 #define AHD_SCSI_HAS_HOST_LOCK 1
257 #else
258 #define AHD_SCSI_HAS_HOST_LOCK 0
259 #endif
261 #define AIC79XX_DRIVER_VERSION "1.3.11"
263 /**************************** Front End Queues ********************************/
265 * Data structure used to cast the Linux struct scsi_cmnd to something
266 * that allows us to use the queue macros. The linux structure has
267 * plenty of space to hold the links fields as required by the queue
268 * macros, but the queue macors require them to have the correct type.
270 struct ahd_cmd_internal {
271 /* Area owned by the Linux scsi layer. */
272 uint8_t private[offsetof(struct scsi_cmnd, SCp.Status)];
273 union {
274 STAILQ_ENTRY(ahd_cmd) ste;
275 LIST_ENTRY(ahd_cmd) le;
276 TAILQ_ENTRY(ahd_cmd) tqe;
277 } links;
278 uint32_t end;
281 struct ahd_cmd {
282 union {
283 struct ahd_cmd_internal icmd;
284 struct scsi_cmnd scsi_cmd;
285 } un;
288 #define acmd_icmd(cmd) ((cmd)->un.icmd)
289 #define acmd_scsi_cmd(cmd) ((cmd)->un.scsi_cmd)
290 #define acmd_links un.icmd.links
292 /*************************** Device Data Structures ***************************/
294 * A per probed device structure used to deal with some error recovery
295 * scenarios that the Linux mid-layer code just doesn't know how to
296 * handle. The structure allocated for a device only becomes persistent
297 * after a successfully completed inquiry command to the target when
298 * that inquiry data indicates a lun is present.
300 TAILQ_HEAD(ahd_busyq, ahd_cmd);
301 typedef enum {
302 AHD_DEV_UNCONFIGURED = 0x01,
303 AHD_DEV_FREEZE_TIL_EMPTY = 0x02, /* Freeze queue until active == 0 */
304 AHD_DEV_TIMER_ACTIVE = 0x04, /* Our timer is active */
305 AHD_DEV_ON_RUN_LIST = 0x08, /* Queued to be run later */
306 AHD_DEV_Q_BASIC = 0x10, /* Allow basic device queuing */
307 AHD_DEV_Q_TAGGED = 0x20, /* Allow full SCSI2 command queueing */
308 AHD_DEV_PERIODIC_OTAG = 0x40, /* Send OTAG to prevent starvation */
309 AHD_DEV_SLAVE_CONFIGURED = 0x80 /* slave_configure() has been called */
310 } ahd_linux_dev_flags;
312 struct ahd_linux_target;
313 struct ahd_linux_device {
314 TAILQ_ENTRY(ahd_linux_device) links;
315 struct ahd_busyq busyq;
318 * The number of transactions currently
319 * queued to the device.
321 int active;
324 * The currently allowed number of
325 * transactions that can be queued to
326 * the device. Must be signed for
327 * conversion from tagged to untagged
328 * mode where the device may have more
329 * than one outstanding active transaction.
331 int openings;
334 * A positive count indicates that this
335 * device's queue is halted.
337 u_int qfrozen;
340 * Cumulative command counter.
342 u_long commands_issued;
345 * The number of tagged transactions when
346 * running at our current opening level
347 * that have been successfully received by
348 * this device since the last QUEUE FULL.
350 u_int tag_success_count;
351 #define AHD_TAG_SUCCESS_INTERVAL 50
353 ahd_linux_dev_flags flags;
356 * Per device timer.
358 struct timer_list timer;
361 * The high limit for the tags variable.
363 u_int maxtags;
366 * The computed number of tags outstanding
367 * at the time of the last QUEUE FULL event.
369 u_int tags_on_last_queuefull;
372 * How many times we have seen a queue full
373 * with the same number of tags. This is used
374 * to stop our adaptive queue depth algorithm
375 * on devices with a fixed number of tags.
377 u_int last_queuefull_same_count;
378 #define AHD_LOCK_TAGS_COUNT 50
381 * How many transactions have been queued
382 * without the device going idle. We use
383 * this statistic to determine when to issue
384 * an ordered tag to prevent transaction
385 * starvation. This statistic is only updated
386 * if the AHD_DEV_PERIODIC_OTAG flag is set
387 * on this device.
389 u_int commands_since_idle_or_otag;
390 #define AHD_OTAG_THRESH 500
392 int lun;
393 Scsi_Device *scsi_device;
394 struct ahd_linux_target *target;
397 typedef enum {
398 AHD_DV_REQUIRED = 0x01,
399 AHD_INQ_VALID = 0x02,
400 AHD_BASIC_DV = 0x04,
401 AHD_ENHANCED_DV = 0x08
402 } ahd_linux_targ_flags;
404 /* DV States */
405 typedef enum {
406 AHD_DV_STATE_EXIT = 0,
407 AHD_DV_STATE_INQ_SHORT_ASYNC,
408 AHD_DV_STATE_INQ_ASYNC,
409 AHD_DV_STATE_INQ_ASYNC_VERIFY,
410 AHD_DV_STATE_TUR,
411 AHD_DV_STATE_REBD,
412 AHD_DV_STATE_INQ_VERIFY,
413 AHD_DV_STATE_WEB,
414 AHD_DV_STATE_REB,
415 AHD_DV_STATE_SU,
416 AHD_DV_STATE_BUSY
417 } ahd_dv_state;
419 struct ahd_linux_target {
420 struct ahd_linux_device *devices[AHD_NUM_LUNS];
421 int channel;
422 int target;
423 int refcount;
424 struct ahd_transinfo last_tinfo;
425 struct ahd_softc *ahd;
426 ahd_linux_targ_flags flags;
427 struct scsi_inquiry_data *inq_data;
429 * The next "fallback" period to use for narrow/wide transfers.
431 uint8_t dv_next_narrow_period;
432 uint8_t dv_next_wide_period;
433 uint8_t dv_max_width;
434 uint8_t dv_max_ppr_options;
435 uint8_t dv_last_ppr_options;
436 u_int dv_echo_size;
437 ahd_dv_state dv_state;
438 u_int dv_state_retry;
439 uint8_t *dv_buffer;
440 uint8_t *dv_buffer1;
443 * Cumulative counter of errors.
445 u_long errors_detected;
446 u_long cmds_since_error;
449 /********************* Definitions Required by the Core ***********************/
451 * Number of SG segments we require. So long as the S/G segments for
452 * a particular transaction are allocated in a physically contiguous
453 * manner and are allocated below 4GB, the number of S/G segments is
454 * unrestricted.
456 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
458 * We dynamically adjust the number of segments in pre-2.5 kernels to
459 * avoid fragmentation issues in the SCSI mid-layer's private memory
460 * allocator. See aic79xx_osm.c ahd_linux_size_nseg() for details.
462 extern u_int ahd_linux_nseg;
463 #define AHD_NSEG ahd_linux_nseg
464 #define AHD_LINUX_MIN_NSEG 64
465 #else
466 #define AHD_NSEG 128
467 #endif
470 * Per-SCB OSM storage.
472 typedef enum {
473 AHD_SCB_UP_EH_SEM = 0x1
474 } ahd_linux_scb_flags;
476 struct scb_platform_data {
477 struct ahd_linux_device *dev;
478 dma_addr_t buf_busaddr;
479 uint32_t xfer_len;
480 uint32_t sense_resid; /* Auto-Sense residual */
481 ahd_linux_scb_flags flags;
485 * Define a structure used for each host adapter. All members are
486 * aligned on a boundary >= the size of the member to honor the
487 * alignment restrictions of the various platforms supported by
488 * this driver.
490 typedef enum {
491 AHD_DV_WAIT_SIMQ_EMPTY = 0x01,
492 AHD_DV_WAIT_SIMQ_RELEASE = 0x02,
493 AHD_DV_ACTIVE = 0x04,
494 AHD_DV_SHUTDOWN = 0x08,
495 AHD_RUN_CMPLT_Q_TIMER = 0x10
496 } ahd_linux_softc_flags;
498 TAILQ_HEAD(ahd_completeq, ahd_cmd);
500 struct ahd_platform_data {
502 * Fields accessed from interrupt context.
504 struct ahd_linux_target *targets[AHD_NUM_TARGETS];
505 TAILQ_HEAD(, ahd_linux_device) device_runq;
506 struct ahd_completeq completeq;
508 spinlock_t spin_lock;
509 struct tasklet_struct runq_tasklet;
510 u_int qfrozen;
511 pid_t dv_pid;
512 struct timer_list completeq_timer;
513 struct timer_list reset_timer;
514 struct timer_list stats_timer;
515 struct semaphore eh_sem;
516 struct semaphore dv_sem;
517 struct semaphore dv_cmd_sem; /* XXX This needs to be in
518 * the target struct
520 struct scsi_device *dv_scsi_dev;
521 struct Scsi_Host *host; /* pointer to scsi host */
522 #define AHD_LINUX_NOIRQ ((uint32_t)~0)
523 uint32_t irq; /* IRQ for this adapter */
524 uint32_t bios_address;
525 uint32_t mem_busaddr; /* Mem Base Addr */
526 uint64_t hw_dma_mask;
527 ahd_linux_softc_flags flags;
530 /************************** OS Utility Wrappers *******************************/
531 #define printf printk
532 #define M_NOWAIT GFP_ATOMIC
533 #define M_WAITOK 0
534 #define malloc(size, type, flags) kmalloc(size, flags)
535 #define free(ptr, type) kfree(ptr)
537 static __inline void ahd_delay(long);
538 static __inline void
539 ahd_delay(long usec)
542 * udelay on Linux can have problems for
543 * multi-millisecond waits. Wait at most
544 * 1024us per call.
546 while (usec > 0) {
547 udelay(usec % 1024);
548 usec -= 1024;
553 /***************************** Low Level I/O **********************************/
554 static __inline uint8_t ahd_inb(struct ahd_softc * ahd, long port);
555 static __inline uint16_t ahd_inw_atomic(struct ahd_softc * ahd, long port);
556 static __inline void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val);
557 static __inline void ahd_outw_atomic(struct ahd_softc * ahd,
558 long port, uint16_t val);
559 static __inline void ahd_outsb(struct ahd_softc * ahd, long port,
560 uint8_t *, int count);
561 static __inline void ahd_insb(struct ahd_softc * ahd, long port,
562 uint8_t *, int count);
564 static __inline uint8_t
565 ahd_inb(struct ahd_softc * ahd, long port)
567 uint8_t x;
569 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
570 x = readb(ahd->bshs[0].maddr + port);
571 } else {
572 x = inb(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
574 mb();
575 return (x);
578 static __inline uint16_t
579 ahd_inw_atomic(struct ahd_softc * ahd, long port)
581 uint8_t x;
583 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
584 x = readw(ahd->bshs[0].maddr + port);
585 } else {
586 x = inw(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
588 mb();
589 return (x);
592 static __inline void
593 ahd_outb(struct ahd_softc * ahd, long port, uint8_t val)
595 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
596 writeb(val, ahd->bshs[0].maddr + port);
597 } else {
598 outb(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
600 mb();
603 static __inline void
604 ahd_outw_atomic(struct ahd_softc * ahd, long port, uint16_t val)
606 if (ahd->tags[0] == BUS_SPACE_MEMIO) {
607 writew(val, ahd->bshs[0].maddr + port);
608 } else {
609 outw(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
611 mb();
614 static __inline void
615 ahd_outsb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
617 int i;
620 * There is probably a more efficient way to do this on Linux
621 * but we don't use this for anything speed critical and this
622 * should work.
624 for (i = 0; i < count; i++)
625 ahd_outb(ahd, port, *array++);
628 static __inline void
629 ahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
631 int i;
634 * There is probably a more efficient way to do this on Linux
635 * but we don't use this for anything speed critical and this
636 * should work.
638 for (i = 0; i < count; i++)
639 *array++ = ahd_inb(ahd, port);
642 /**************************** Initialization **********************************/
643 int ahd_linux_register_host(struct ahd_softc *,
644 Scsi_Host_Template *);
646 uint64_t ahd_linux_get_memsize(void);
648 /*************************** Pretty Printing **********************************/
649 struct info_str {
650 char *buffer;
651 int length;
652 off_t offset;
653 int pos;
656 void ahd_format_transinfo(struct info_str *info,
657 struct ahd_transinfo *tinfo);
659 /******************************** Locking *************************************/
660 /* Lock protecting internal data structures */
661 static __inline void ahd_lockinit(struct ahd_softc *);
662 static __inline void ahd_lock(struct ahd_softc *, unsigned long *flags);
663 static __inline void ahd_unlock(struct ahd_softc *, unsigned long *flags);
665 /* Lock acquisition and release of the above lock in midlayer entry points. */
666 static __inline void ahd_midlayer_entrypoint_lock(struct ahd_softc *,
667 unsigned long *flags);
668 static __inline void ahd_midlayer_entrypoint_unlock(struct ahd_softc *,
669 unsigned long *flags);
671 /* Lock held during command compeletion to the upper layer */
672 static __inline void ahd_done_lockinit(struct ahd_softc *);
673 static __inline void ahd_done_lock(struct ahd_softc *, unsigned long *flags);
674 static __inline void ahd_done_unlock(struct ahd_softc *, unsigned long *flags);
676 /* Lock held during ahd_list manipulation and ahd softc frees */
677 extern spinlock_t ahd_list_spinlock;
678 static __inline void ahd_list_lockinit(void);
679 static __inline void ahd_list_lock(unsigned long *flags);
680 static __inline void ahd_list_unlock(unsigned long *flags);
682 static __inline void
683 ahd_lockinit(struct ahd_softc *ahd)
685 spin_lock_init(&ahd->platform_data->spin_lock);
688 static __inline void
689 ahd_lock(struct ahd_softc *ahd, unsigned long *flags)
691 spin_lock_irqsave(&ahd->platform_data->spin_lock, *flags);
694 static __inline void
695 ahd_unlock(struct ahd_softc *ahd, unsigned long *flags)
697 spin_unlock_irqrestore(&ahd->platform_data->spin_lock, *flags);
700 static __inline void
701 ahd_midlayer_entrypoint_lock(struct ahd_softc *ahd, unsigned long *flags)
704 * In 2.5.X and some 2.4.X versions, the midlayer takes our
705 * lock just before calling us, so we avoid locking again.
706 * For other kernel versions, the io_request_lock is taken
707 * just before our entry point is called. In this case, we
708 * trade the io_request_lock for our per-softc lock.
710 #if AHD_SCSI_HAS_HOST_LOCK == 0
711 spin_unlock(&io_request_lock);
712 spin_lock(&ahd->platform_data->spin_lock);
713 #endif
716 static __inline void
717 ahd_midlayer_entrypoint_unlock(struct ahd_softc *ahd, unsigned long *flags)
719 #if AHD_SCSI_HAS_HOST_LOCK == 0
720 spin_unlock(&ahd->platform_data->spin_lock);
721 spin_lock(&io_request_lock);
722 #endif
725 static __inline void
726 ahd_done_lockinit(struct ahd_softc *ahd)
729 * In 2.5.X, our own lock is held during completions.
730 * In previous versions, the io_request_lock is used.
731 * In either case, we can't initialize this lock again.
735 static __inline void
736 ahd_done_lock(struct ahd_softc *ahd, unsigned long *flags)
738 #if AHD_SCSI_HAS_HOST_LOCK == 0
739 spin_lock(&io_request_lock);
740 #endif
743 static __inline void
744 ahd_done_unlock(struct ahd_softc *ahd, unsigned long *flags)
746 #if AHD_SCSI_HAS_HOST_LOCK == 0
747 spin_unlock(&io_request_lock);
748 #endif
751 static __inline void
752 ahd_list_lockinit(void)
754 spin_lock_init(&ahd_list_spinlock);
757 static __inline void
758 ahd_list_lock(unsigned long *flags)
760 spin_lock_irqsave(&ahd_list_spinlock, *flags);
763 static __inline void
764 ahd_list_unlock(unsigned long *flags)
766 spin_unlock_irqrestore(&ahd_list_spinlock, *flags);
769 /******************************* PCI Definitions ******************************/
771 * PCIM_xxx: mask to locate subfield in register
772 * PCIR_xxx: config register offset
773 * PCIC_xxx: device class
774 * PCIS_xxx: device subclass
775 * PCIP_xxx: device programming interface
776 * PCIV_xxx: PCI vendor ID (only required to fixup ancient devices)
777 * PCID_xxx: device ID
779 #define PCIR_DEVVENDOR 0x00
780 #define PCIR_VENDOR 0x00
781 #define PCIR_DEVICE 0x02
782 #define PCIR_COMMAND 0x04
783 #define PCIM_CMD_PORTEN 0x0001
784 #define PCIM_CMD_MEMEN 0x0002
785 #define PCIM_CMD_BUSMASTEREN 0x0004
786 #define PCIM_CMD_MWRICEN 0x0010
787 #define PCIM_CMD_PERRESPEN 0x0040
788 #define PCIM_CMD_SERRESPEN 0x0100
789 #define PCIR_STATUS 0x06
790 #define PCIR_REVID 0x08
791 #define PCIR_PROGIF 0x09
792 #define PCIR_SUBCLASS 0x0a
793 #define PCIR_CLASS 0x0b
794 #define PCIR_CACHELNSZ 0x0c
795 #define PCIR_LATTIMER 0x0d
796 #define PCIR_HEADERTYPE 0x0e
797 #define PCIM_MFDEV 0x80
798 #define PCIR_BIST 0x0f
799 #define PCIR_CAP_PTR 0x34
801 /* config registers for header type 0 devices */
802 #define PCIR_MAPS 0x10
803 #define PCIR_SUBVEND_0 0x2c
804 #define PCIR_SUBDEV_0 0x2e
806 /****************************** PCI-X definitions *****************************/
807 #define PCIXR_COMMAND 0x96
808 #define PCIXR_DEVADDR 0x98
809 #define PCIXM_DEVADDR_FNUM 0x0003 /* Function Number */
810 #define PCIXM_DEVADDR_DNUM 0x00F8 /* Device Number */
811 #define PCIXM_DEVADDR_BNUM 0xFF00 /* Bus Number */
812 #define PCIXR_STATUS 0x9A
813 #define PCIXM_STATUS_64BIT 0x0001 /* Active 64bit connection to device. */
814 #define PCIXM_STATUS_133CAP 0x0002 /* Device is 133MHz capable */
815 #define PCIXM_STATUS_SCDISC 0x0004 /* Split Completion Discarded */
816 #define PCIXM_STATUS_UNEXPSC 0x0008 /* Unexpected Split Completion */
817 #define PCIXM_STATUS_CMPLEXDEV 0x0010 /* Device Complexity (set == bridge) */
818 #define PCIXM_STATUS_MAXMRDBC 0x0060 /* Maximum Burst Read Count */
819 #define PCIXM_STATUS_MAXSPLITS 0x0380 /* Maximum Split Transactions */
820 #define PCIXM_STATUS_MAXCRDS 0x1C00 /* Maximum Cumulative Read Size */
821 #define PCIXM_STATUS_RCVDSCEM 0x2000 /* Received a Split Comp w/Error msg */
823 extern struct pci_driver aic79xx_pci_driver;
825 typedef enum
827 AHD_POWER_STATE_D0,
828 AHD_POWER_STATE_D1,
829 AHD_POWER_STATE_D2,
830 AHD_POWER_STATE_D3
831 } ahd_power_state;
833 void ahd_power_state_change(struct ahd_softc *ahd,
834 ahd_power_state new_state);
836 /******************************* PCI Routines *********************************/
837 int ahd_linux_pci_init(void);
838 void ahd_linux_pci_exit(void);
839 int ahd_pci_map_registers(struct ahd_softc *ahd);
840 int ahd_pci_map_int(struct ahd_softc *ahd);
842 static __inline uint32_t ahd_pci_read_config(ahd_dev_softc_t pci,
843 int reg, int width);
845 static __inline uint32_t
846 ahd_pci_read_config(ahd_dev_softc_t pci, int reg, int width)
848 switch (width) {
849 case 1:
851 uint8_t retval;
853 pci_read_config_byte(pci, reg, &retval);
854 return (retval);
856 case 2:
858 uint16_t retval;
859 pci_read_config_word(pci, reg, &retval);
860 return (retval);
862 case 4:
864 uint32_t retval;
865 pci_read_config_dword(pci, reg, &retval);
866 return (retval);
868 default:
869 panic("ahd_pci_read_config: Read size too big");
870 /* NOTREACHED */
871 return (0);
875 static __inline void ahd_pci_write_config(ahd_dev_softc_t pci,
876 int reg, uint32_t value,
877 int width);
879 static __inline void
880 ahd_pci_write_config(ahd_dev_softc_t pci, int reg, uint32_t value, int width)
882 switch (width) {
883 case 1:
884 pci_write_config_byte(pci, reg, value);
885 break;
886 case 2:
887 pci_write_config_word(pci, reg, value);
888 break;
889 case 4:
890 pci_write_config_dword(pci, reg, value);
891 break;
892 default:
893 panic("ahd_pci_write_config: Write size too big");
894 /* NOTREACHED */
898 static __inline int ahd_get_pci_function(ahd_dev_softc_t);
899 static __inline int
900 ahd_get_pci_function(ahd_dev_softc_t pci)
902 return (PCI_FUNC(pci->devfn));
905 static __inline int ahd_get_pci_slot(ahd_dev_softc_t);
906 static __inline int
907 ahd_get_pci_slot(ahd_dev_softc_t pci)
909 return (PCI_SLOT(pci->devfn));
912 static __inline int ahd_get_pci_bus(ahd_dev_softc_t);
913 static __inline int
914 ahd_get_pci_bus(ahd_dev_softc_t pci)
916 return (pci->bus->number);
919 static __inline void ahd_flush_device_writes(struct ahd_softc *);
920 static __inline void
921 ahd_flush_device_writes(struct ahd_softc *ahd)
923 /* XXX Is this sufficient for all architectures??? */
924 ahd_inb(ahd, INTSTAT);
927 /**************************** Proc FS Support *********************************/
928 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
929 int ahd_linux_proc_info(char *, char **, off_t, int, int, int);
930 #else
931 int ahd_linux_proc_info(struct Scsi_Host *, char *, char **,
932 off_t, int, int);
933 #endif
935 /*************************** Domain Validation ********************************/
936 #define AHD_DV_CMD(cmd) ((cmd)->scsi_done == ahd_linux_dv_complete)
937 #define AHD_DV_SIMQ_FROZEN(ahd) \
938 ((((ahd)->platform_data->flags & AHD_DV_ACTIVE) != 0) \
939 && (ahd)->platform_data->qfrozen == 1)
941 /*********************** Transaction Access Wrappers **************************/
942 static __inline void ahd_cmd_set_transaction_status(Scsi_Cmnd *, uint32_t);
943 static __inline void ahd_set_transaction_status(struct scb *, uint32_t);
944 static __inline void ahd_cmd_set_scsi_status(Scsi_Cmnd *, uint32_t);
945 static __inline void ahd_set_scsi_status(struct scb *, uint32_t);
946 static __inline uint32_t ahd_cmd_get_transaction_status(Scsi_Cmnd *cmd);
947 static __inline uint32_t ahd_get_transaction_status(struct scb *);
948 static __inline uint32_t ahd_cmd_get_scsi_status(Scsi_Cmnd *cmd);
949 static __inline uint32_t ahd_get_scsi_status(struct scb *);
950 static __inline void ahd_set_transaction_tag(struct scb *, int, u_int);
951 static __inline u_long ahd_get_transfer_length(struct scb *);
952 static __inline int ahd_get_transfer_dir(struct scb *);
953 static __inline void ahd_set_residual(struct scb *, u_long);
954 static __inline void ahd_set_sense_residual(struct scb *scb, u_long resid);
955 static __inline u_long ahd_get_residual(struct scb *);
956 static __inline u_long ahd_get_sense_residual(struct scb *);
957 static __inline int ahd_perform_autosense(struct scb *);
958 static __inline uint32_t ahd_get_sense_bufsize(struct ahd_softc *,
959 struct scb *);
960 static __inline void ahd_notify_xfer_settings_change(struct ahd_softc *,
961 struct ahd_devinfo *);
962 static __inline void ahd_platform_scb_free(struct ahd_softc *ahd,
963 struct scb *scb);
964 static __inline void ahd_freeze_scb(struct scb *scb);
966 static __inline
967 void ahd_cmd_set_transaction_status(Scsi_Cmnd *cmd, uint32_t status)
969 cmd->result &= ~(CAM_STATUS_MASK << 16);
970 cmd->result |= status << 16;
973 static __inline
974 void ahd_set_transaction_status(struct scb *scb, uint32_t status)
976 ahd_cmd_set_transaction_status(scb->io_ctx,status);
979 static __inline
980 void ahd_cmd_set_scsi_status(Scsi_Cmnd *cmd, uint32_t status)
982 cmd->result &= ~0xFFFF;
983 cmd->result |= status;
986 static __inline
987 void ahd_set_scsi_status(struct scb *scb, uint32_t status)
989 ahd_cmd_set_scsi_status(scb->io_ctx, status);
992 static __inline
993 uint32_t ahd_cmd_get_transaction_status(Scsi_Cmnd *cmd)
995 return ((cmd->result >> 16) & CAM_STATUS_MASK);
998 static __inline
999 uint32_t ahd_get_transaction_status(struct scb *scb)
1001 return (ahd_cmd_get_transaction_status(scb->io_ctx));
1004 static __inline
1005 uint32_t ahd_cmd_get_scsi_status(Scsi_Cmnd *cmd)
1007 return (cmd->result & 0xFFFF);
1010 static __inline
1011 uint32_t ahd_get_scsi_status(struct scb *scb)
1013 return (ahd_cmd_get_scsi_status(scb->io_ctx));
1016 static __inline
1017 void ahd_set_transaction_tag(struct scb *scb, int enabled, u_int type)
1020 * Nothing to do for linux as the incoming transaction
1021 * has no concept of tag/non tagged, etc.
1025 static __inline
1026 u_long ahd_get_transfer_length(struct scb *scb)
1028 return (scb->platform_data->xfer_len);
1031 static __inline
1032 int ahd_get_transfer_dir(struct scb *scb)
1034 return (scb->io_ctx->sc_data_direction);
1037 static __inline
1038 void ahd_set_residual(struct scb *scb, u_long resid)
1040 scb->io_ctx->resid = resid;
1043 static __inline
1044 void ahd_set_sense_residual(struct scb *scb, u_long resid)
1046 scb->platform_data->sense_resid = resid;
1049 static __inline
1050 u_long ahd_get_residual(struct scb *scb)
1052 return (scb->io_ctx->resid);
1055 static __inline
1056 u_long ahd_get_sense_residual(struct scb *scb)
1058 return (scb->platform_data->sense_resid);
1061 static __inline
1062 int ahd_perform_autosense(struct scb *scb)
1065 * We always perform autosense in Linux.
1066 * On other platforms this is set on a
1067 * per-transaction basis.
1069 return (1);
1072 static __inline uint32_t
1073 ahd_get_sense_bufsize(struct ahd_softc *ahd, struct scb *scb)
1075 return (sizeof(struct scsi_sense_data));
1078 static __inline void
1079 ahd_notify_xfer_settings_change(struct ahd_softc *ahd,
1080 struct ahd_devinfo *devinfo)
1082 /* Nothing to do here for linux */
1085 static __inline void
1086 ahd_platform_scb_free(struct ahd_softc *ahd, struct scb *scb)
1088 ahd->flags &= ~AHD_RESOURCE_SHORTAGE;
1091 int ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg);
1092 void ahd_platform_free(struct ahd_softc *ahd);
1093 void ahd_platform_init(struct ahd_softc *ahd);
1094 void ahd_platform_freeze_devq(struct ahd_softc *ahd, struct scb *scb);
1095 void ahd_freeze_simq(struct ahd_softc *ahd);
1096 void ahd_release_simq(struct ahd_softc *ahd);
1098 static __inline void
1099 ahd_freeze_scb(struct scb *scb)
1101 if ((scb->io_ctx->result & (CAM_DEV_QFRZN << 16)) == 0) {
1102 scb->io_ctx->result |= CAM_DEV_QFRZN << 16;
1103 scb->platform_data->dev->qfrozen++;
1107 void ahd_platform_set_tags(struct ahd_softc *ahd,
1108 struct ahd_devinfo *devinfo, ahd_queue_alg);
1109 int ahd_platform_abort_scbs(struct ahd_softc *ahd, int target,
1110 char channel, int lun, u_int tag,
1111 role_t role, uint32_t status);
1112 irqreturn_t
1113 ahd_linux_isr(int irq, void *dev_id, struct pt_regs * regs);
1114 void ahd_platform_flushwork(struct ahd_softc *ahd);
1115 int ahd_softc_comp(struct ahd_softc *, struct ahd_softc *);
1116 void ahd_done(struct ahd_softc*, struct scb*);
1117 void ahd_send_async(struct ahd_softc *, char channel,
1118 u_int target, u_int lun, ac_code, void *);
1119 void ahd_print_path(struct ahd_softc *, struct scb *);
1120 void ahd_platform_dump_card_state(struct ahd_softc *ahd);
1122 #ifdef CONFIG_PCI
1123 #define AHD_PCI_CONFIG 1
1124 #else
1125 #define AHD_PCI_CONFIG 0
1126 #endif
1127 #define bootverbose aic79xx_verbose
1128 extern uint32_t aic79xx_verbose;
1130 #endif /* _AIC79XX_LINUX_H_ */