2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
10 #ifndef _ASM_IA64_SN_BTE_H
11 #define _ASM_IA64_SN_BTE_H
13 #include <linux/timer.h>
14 #include <linux/spinlock.h>
15 #include <linux/cache.h>
16 #include <asm/sn/types.h>
19 /* #define BTE_DEBUG */
20 /* #define BTE_DEBUG_VERBOSE */
23 # define BTE_PRINTK(x) printk x /* Terse */
24 # ifdef BTE_DEBUG_VERBOSE
25 # define BTE_PRINTKV(x) printk x /* Verbose */
27 # define BTE_PRINTKV(x)
28 # endif /* BTE_DEBUG_VERBOSE */
30 # define BTE_PRINTK(x)
31 # define BTE_PRINTKV(x)
32 #endif /* BTE_DEBUG */
35 /* BTE status register only supports 16 bits for length field */
36 #define BTE_LEN_BITS (16)
37 #define BTE_LEN_MASK ((1 << BTE_LEN_BITS) - 1)
38 #define BTE_MAX_XFER ((1 << BTE_LEN_BITS) * L1_CACHE_BYTES)
42 #define BTES_PER_NODE 2
45 /* Define hardware modes */
46 #define BTE_NOTIFY (IBCT_NOTIFY)
47 #define BTE_NORMAL BTE_NOTIFY
48 #define BTE_ZERO_FILL (BTE_NOTIFY | IBCT_ZFIL_MODE)
49 /* Use a reserved bit to let the caller specify a wait for any BTE */
50 #define BTE_WACQUIRE (0x4000)
51 /* Use the BTE on the node with the destination memory */
52 #define BTE_USE_DEST (BTE_WACQUIRE << 1)
53 /* Use any available BTE interface on any node for the transfer */
54 #define BTE_USE_ANY (BTE_USE_DEST << 1)
55 /* macro to force the IBCT0 value valid */
56 #define BTE_VALID_MODE(x) ((x) & (IBCT_NOTIFY | IBCT_ZFIL_MODE))
58 #define BTE_ACTIVE (IBLS_BUSY | IBLS_ERROR)
61 * Some macros to simplify reading.
62 * Start with macros to locate the BTE control registers.
64 #define BTE_LNSTAT_LOAD(_bte) \
65 HUB_L(_bte->bte_base_addr)
66 #define BTE_LNSTAT_STORE(_bte, _x) \
67 HUB_S(_bte->bte_base_addr, (_x))
68 #define BTE_SRC_STORE(_bte, _x) \
69 HUB_S(_bte->bte_base_addr + (BTEOFF_SRC/8), (_x))
70 #define BTE_DEST_STORE(_bte, _x) \
71 HUB_S(_bte->bte_base_addr + (BTEOFF_DEST/8), (_x))
72 #define BTE_CTRL_STORE(_bte, _x) \
73 HUB_S(_bte->bte_base_addr + (BTEOFF_CTRL/8), (_x))
74 #define BTE_NOTIF_STORE(_bte, _x) \
75 HUB_S(_bte->bte_base_addr + (BTEOFF_NOTIFY/8), (_x))
78 /* Possible results from bte_copy and bte_unaligned_copy */
79 /* The following error codes map into the BTE hardware codes
80 * IIO_ICRB_ECODE_* (in shubio.h). The hardware uses
81 * an error code of 0 (IIO_ICRB_ECODE_DERR), but we want zero
82 * to mean BTE_SUCCESS, so add one (BTEFAIL_OFFSET) to the error
83 * codes to give the following error codes.
85 #define BTEFAIL_OFFSET 1
88 BTE_SUCCESS
, /* 0 is success */
89 BTEFAIL_DIR
, /* Directory error due to IIO access*/
90 BTEFAIL_POISON
, /* poison error on IO access (write to poison page) */
91 BTEFAIL_WERR
, /* Write error (ie WINV to a Read only line) */
92 BTEFAIL_ACCESS
, /* access error (protection violation) */
93 BTEFAIL_PWERR
, /* Partial Write Error */
94 BTEFAIL_PRERR
, /* Partial Read Error */
95 BTEFAIL_TOUT
, /* CRB Time out */
96 BTEFAIL_XTERR
, /* Incoming xtalk pkt had error bit */
97 BTEFAIL_NOTAVAIL
, /* BTE not available */
102 * Structure defining a bte. An instance of this
103 * structure is created in the nodepda for each
104 * bte on that node (as defined by BTES_PER_NODE)
105 * This structure contains everything necessary
106 * to work with a BTE.
109 volatile u64 notify ____cacheline_aligned
;
110 u64
*bte_base_addr ____cacheline_aligned
;
112 cnodeid_t bte_cnode
; /* cnode */
113 int bte_error_count
; /* Number of errors encountered */
114 int bte_num
; /* 0 --> BTE0, 1 --> BTE1 */
115 int cleanup_active
; /* Interface is locked for cleanup */
116 volatile bte_result_t bh_error
; /* error while processing */
117 volatile u64
*most_rcnt_na
;
122 * Function prototypes (functions defined in bte.c, used elsewhere)
124 extern bte_result_t
bte_copy(u64
, u64
, u64
, u64
, void *);
125 extern bte_result_t
bte_unaligned_copy(u64
, u64
, u64
, u64
);
126 extern void bte_error_handler(unsigned long);
128 #define bte_zero(dest, len, mode, notification) \
129 bte_copy(0, dest, len, ((mode) | BTE_ZERO_FILL), notification)
132 * The following is the prefered way of calling bte_unaligned_copy
133 * If the copy is fully cache line aligned, then bte_copy is
134 * used instead. Since bte_copy is inlined, this saves a call
135 * stack. NOTE: bte_copy is called synchronously and does block
136 * until the transfer is complete. In order to get the asynch
137 * version of bte_copy, you must perform this check yourself.
139 #define BTE_UNALIGNED_COPY(src, dest, len, mode) \
140 (((len & L1_CACHE_MASK) || (src & L1_CACHE_MASK) || \
141 (dest & L1_CACHE_MASK)) ? \
142 bte_unaligned_copy(src, dest, len, mode) : \
143 bte_copy(src, dest, len, mode, NULL))
146 #endif /* _ASM_IA64_SN_BTE_H */