4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #ifndef _SYS_IB_ADAPTERS_TAVOR_EVENT_H
28 #define _SYS_IB_ADAPTERS_TAVOR_EVENT_H
32 * Contains all of the prototypes, #defines, and structures necessary
33 * for the Interrupt and Event Processing routines
34 * Specifically it contains the various event types, event flags,
35 * structures used for managing Tavor event queues, and prototypes for
36 * many of the functions consumed by other parts of the Tavor driver.
39 #include <sys/types.h>
42 #include <sys/sunddi.h>
49 * Tavor UAR Doorbell Write Macro
51 * If on a 32-bit system, we must hold a lock around the ddi_put64() to
52 * ensure that the 64-bit write is an atomic operation. This is a
53 * requirement of the Tavor hardware and is to protect from the race
54 * condition present when more than one kernel thread attempts to do each
55 * of their two 32-bit accesses (for 64-bit doorbell) simultaneously.
57 * If we are on a 64-bit system then the ddi_put64() is completed as one
58 * 64-bit instruction, and the lock is not needed.
60 * This is done as a preprocessor #if to speed up execution at run-time
61 * since doorbell ringing is a "fast-path" operation.
63 #if (DATAMODEL_NATIVE == DATAMODEL_ILP32)
64 #define TAVOR_UAR_DOORBELL(state, ts_uar, doorbell) { \
65 mutex_enter(&state->ts_uar_lock); \
66 ddi_put64(state->ts_reg_uarhdl, ts_uar, doorbell); \
67 mutex_exit(&state->ts_uar_lock); \
70 #define TAVOR_UAR_DOORBELL(state, ts_uar, doorbell) { \
71 ddi_put64(state->ts_reg_uarhdl, ts_uar, doorbell); \
76 * The following defines specify the default number of Event Queues (EQ) and
77 * their default size. By default the size of each EQ is set to 4K entries,
78 * but this value is controllable through the "log_default_eq_sz"
79 * configuration variable. We also specify the number of EQs which the Tavor
80 * driver currently uses (TAVOR_NUM_EQ_USED). Note: this value should be
81 * less than or equal to TAVOR_NUM_EQ. Because there are only so many classes
82 * of events today, it is unnecessary to allocate all 64 EQs only to leave
83 * several of them unused.
85 #define TAVOR_NUM_EQ_SHIFT 0x6
86 #define TAVOR_NUM_EQ (1 << TAVOR_NUM_EQ_SHIFT)
87 #define TAVOR_NUM_EQ_USED 47
88 #define TAVOR_DEFAULT_EQ_SZ_SHIFT 0xC
91 * The following macro determines whether the contents of EQ memory (EQEs)
92 * need to be sync'd (with ddi_dma_sync()). This decision is based on whether
93 * the EQ memory is in DDR memory (no sync) or system memory (sync required).
94 * Note: It doesn't make much sense to put EQEs in DDR memory (since they are
95 * primarily written by HW and read by the CPU), but the driver does support
96 * that possibility. And it also supports the possibility that if a CQ in
97 * system memory is mapped DDI_DMA_CONSISTENT, it can be configured to not be
98 * sync'd because of the "sync override" parameter in the config profile.
100 #define TAVOR_EQ_IS_SYNC_REQ(state, eqinfo) \
101 ((((((state)->ts_cfg_profile->cp_streaming_consistent) && \
102 ((state)->ts_cfg_profile->cp_consistent_syncoverride))) || \
103 ((eqinfo).qa_location == TAVOR_QUEUE_LOCATION_INDDR)) \
107 * The following defines specify the size of the individual Event Queue
108 * Context (EQC) entries
110 #define TAVOR_EQC_SIZE_SHIFT 0x6
111 #define TAVOR_EQC_SIZE (1 << TAVOR_EQC_SIZE_SHIFT)
114 * These are the defines for the Tavor event types. They are specified by
115 * the Tavor register specification. Below are the "event type masks" in
116 * which each event type corresponds to one of the 64-bits in the mask.
118 #define TAVOR_EVT_COMPLETION 0x00
119 #define TAVOR_EVT_PATH_MIGRATED 0x01
120 #define TAVOR_EVT_COMM_ESTABLISHED 0x02
121 #define TAVOR_EVT_SEND_QUEUE_DRAINED 0x03
122 #define TAVOR_EVT_CQ_ERRORS 0x04
123 #define TAVOR_EVT_LOCAL_WQ_CAT_ERROR 0x05
124 #define TAVOR_EVT_LOCAL_EEC_CAT_ERROR 0x06 /* unsupported: RD */
125 #define TAVOR_EVT_PATH_MIGRATE_FAILED 0x07
126 #define TAVOR_EVT_LOCAL_CAT_ERROR 0x08
127 #define TAVOR_EVT_PORT_STATE_CHANGE 0x09
128 #define TAVOR_EVT_COMMAND_INTF_COMP 0x0A
129 #define TAVOR_EVT_WQE_PG_FAULT 0x0B
130 #define TAVOR_EVT_UNSUPPORTED_PG_FAULT 0x0C
131 #define TAVOR_EVT_ECC_DETECTION 0x0E
132 #define TAVOR_EVT_EQ_OVERFLOW 0x0F
133 #define TAVOR_EVT_INV_REQ_LOCAL_WQ_ERROR 0x10
134 #define TAVOR_EVT_LOCAL_ACC_VIO_WQ_ERROR 0x11
135 #define TAVOR_EVT_SRQ_CATASTROPHIC_ERROR 0x12
136 #define TAVOR_EVT_SRQ_LAST_WQE_REACHED 0x13
138 #define TAVOR_EVT_MSK_COMPLETION \
139 (1 << TAVOR_EVT_COMPLETION)
140 #define TAVOR_EVT_MSK_PATH_MIGRATED \
141 (1 << TAVOR_EVT_PATH_MIGRATED)
142 #define TAVOR_EVT_MSK_COMM_ESTABLISHED \
143 (1 << TAVOR_EVT_COMM_ESTABLISHED)
144 #define TAVOR_EVT_MSK_SEND_QUEUE_DRAINED \
145 (1 << TAVOR_EVT_SEND_QUEUE_DRAINED)
146 #define TAVOR_EVT_MSK_CQ_ERRORS \
147 (1 << TAVOR_EVT_CQ_ERRORS)
148 #define TAVOR_EVT_MSK_LOCAL_WQ_CAT_ERROR \
149 (1 << TAVOR_EVT_LOCAL_WQ_CAT_ERROR)
150 #define TAVOR_EVT_MSK_LOCAL_EEC_CAT_ERROR \
151 (1 << TAVOR_EVT_LOCAL_EEC_CAT_ERROR) /* unsupported: RD */
152 #define TAVOR_EVT_MSK_PATH_MIGRATE_FAILED \
153 (1 << TAVOR_EVT_PATH_MIGRATE_FAILED)
154 #define TAVOR_EVT_MSK_LOCAL_CAT_ERROR \
155 (1 << TAVOR_EVT_LOCAL_CAT_ERROR)
156 #define TAVOR_EVT_MSK_PORT_STATE_CHANGE \
157 (1 << TAVOR_EVT_PORT_STATE_CHANGE)
158 #define TAVOR_EVT_MSK_COMMAND_INTF_COMP \
159 (1 << TAVOR_EVT_COMMAND_INTF_COMP)
160 #define TAVOR_EVT_MSK_WQE_PG_FAULT \
161 (1 << TAVOR_EVT_WQE_PG_FAULT)
162 #define TAVOR_EVT_MSK_UNSUPPORTED_PG_FAULT \
163 (1 << TAVOR_EVT_UNSUPPORTED_PG_FAULT)
164 #define TAVOR_EVT_MSK_INV_REQ_LOCAL_WQ_ERROR \
165 (1 << TAVOR_EVT_INV_REQ_LOCAL_WQ_ERROR)
166 #define TAVOR_EVT_MSK_LOCAL_ACC_VIO_WQ_ERROR \
167 (1 << TAVOR_EVT_LOCAL_ACC_VIO_WQ_ERROR)
168 #define TAVOR_EVT_MSK_SRQ_CATASTROPHIC_ERROR \
169 (1 << TAVOR_EVT_SRQ_CATASTROPHIC_ERROR)
170 #define TAVOR_EVT_MSK_SRQ_LAST_WQE_REACHED \
171 (1 << TAVOR_EVT_SRQ_LAST_WQE_REACHED)
172 #define TAVOR_EVT_MSK_ECC_DETECTION \
173 (1 << TAVOR_EVT_ECC_DETECTION)
174 #define TAVOR_EVT_NO_MASK 0
175 #define TAVOR_EVT_CATCHALL_MASK 0x1840
178 * The last defines are used by tavor_eqe_sync() to indicate whether or not
179 * to force a DMA sync. The case for forcing a DMA sync on a EQE comes from
180 * the possibility that we could receive an interrupt, read of the ECR, and
181 * have each of these operations complete successfully _before_ the hardware
182 * has finished its DMA to the event queue.
184 #define TAVOR_EQ_SYNC_NORMAL 0x0
185 #define TAVOR_EQ_SYNC_FORCE 0x1
188 * Catastrophic error values. In case of a catastrophic error, the following
189 * errors are reported in a special buffer space. The buffer location is
190 * returned from a QUERY_FW command. We check that buffer against these error
191 * values to determine what kind of error occurred.
193 #define TAVOR_CATASTROPHIC_INTERNAL_ERROR 0x0
194 #define TAVOR_CATASTROPHIC_UPLINK_BUS_ERROR 0x3
195 #define TAVOR_CATASTROPHIC_DDR_DATA_ERROR 0x4
196 #define TAVOR_CATASTROPHIC_INTERNAL_PARITY_ERROR 0x5
199 * This define is the 'enable' flag used when programming the MSI number
200 * into event queues. It is or'd with the MSI number and the result is
201 * written into the EX context.
204 #define TAVOR_EQ_MSI_ENABLE_FLAG 0x80
207 * The tavor_sw_eq_s structure is also referred to using the "tavor_eqhdl_t"
208 * typedef (see tavor_typedef.h). It encodes all the information necessary
209 * to track the various resources needed to allocate, initialize, poll, and
210 * (later) free an event queue (EQ).
212 * Specifically, it has a consumer index and a lock to ensure single threaded
213 * access to it. It has pointers to the various resources allocated for the
214 * event queue, i.e. an EQC resource and the memory for the event queue
215 * itself. It has flags to indicate whether the EQ requires ddi_dma_sync()
216 * ("eq_sync") or to indicate which type of event class(es) the EQ has been
217 * mapped to (eq_evttypemask).
219 * It also has a pointer to the associated MR handle (for the mapped queue
220 * memory) and a function pointer that points to the handler that should
221 * be called when the corresponding EQ has fired. Note: the "eq_func"
222 * handler takes a Tavor softstate pointer, a pointer to the EQ handle, and a
223 * pointer to a generic tavor_hw_eqe_t structure. It is up to the "eq_func"
224 * handler function to determine what specific type of event is being passed.
226 * Lastly, we have the always necessary backpointer to the resource for the
227 * EQ handle structure itself.
229 struct tavor_sw_eq_s
{
230 uint32_t eq_consindx
;
232 tavor_hw_eqe_t
*eq_buf
;
233 tavor_mrhdl_t eq_mrhdl
;
236 uint_t eq_evttypemask
;
237 tavor_rsrc_t
*eq_eqcrsrcp
;
238 tavor_rsrc_t
*eq_rsrcp
;
239 int (*eq_func
)(tavor_state_t
*state
, tavor_eqhdl_t eq
,
240 tavor_hw_eqe_t
*eqe
);
242 struct tavor_qalloc_info_s eq_eqinfo
;
245 int tavor_eq_init_all(tavor_state_t
*state
);
246 int tavor_eq_fini_all(tavor_state_t
*state
);
247 void tavor_eq_arm_all(tavor_state_t
*state
);
248 uint_t
tavor_isr(caddr_t arg1
, caddr_t arg2
);
249 void tavor_eq_doorbell(tavor_state_t
*state
, uint32_t eq_cmd
, uint32_t eqn
,
251 void tavor_eq_overflow_handler(tavor_state_t
*state
, tavor_eqhdl_t eq
,
252 tavor_hw_eqe_t
*eqe
);
258 #endif /* _SYS_IB_ADAPTERS_TAVOR_EVENT_H */