4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
34 #include <sys/types.h>
35 #include <sys/dcopy.h>
36 #include <sys/dcopy_device.h>
40 #define IOATIOC ('T'<< 8)
42 IOAT_IOCTL_WRITE_REG
= (IOATIOC
| 0x0),
43 IOAT_IOCTL_READ_REG
= (IOATIOC
| 0x1),
44 IOAT_IOCTL_TEST
= (IOATIOC
| 0x2)
47 typedef struct ioat_ioctl_reg_s
{
52 typedef ioat_ioctl_reg_t ioat_ioctl_wrreg_t
;
53 typedef ioat_ioctl_reg_t ioat_ioctl_rdreg_t
;
56 /* *** Driver Private Below *** */
58 /* IOAT_DMACAPABILITY flags */
59 #define IOAT_DMACAP_PAGEBREAK 0x1
60 #define IOAT_DMACAP_CRC 0x2
61 #define IOAT_DMACAP_MARKERSKIP 0x4
62 #define IOAT_DMACAP_XOR 0x8
63 #define IOAT_DMACAP_DCA 0x10
65 /* IOAT_INTRCTL bits */
66 #define IOAT_INTRCTL_MASTER_EN 0x1
67 #define IOAT_INTRCTL_INTR_STAT 0x2
70 #define IOAT_CHANCNT 0x0 /* 8-bit */
71 #define IOAT_XFERCAP 0x1 /* 8-bit */
72 #define IOAT_GENCTRL 0x2 /* 8-bit */
73 #define IOAT_INTRCTL 0x3 /* 8-bit */
74 #define IOAT_ATTNSTATUS 0x4 /* 32-bit */
75 #define IOAT_CBVER 0x8 /* 8-bit */
76 #define IOAT_PERPORT_OFF 0xA /* 16-bit */
77 #define IOAT_INTRDELAY 0xC /* 16-bit */
78 #define IOAT_CSSTATUS 0xE /* 16-bit */
79 #define IOAT_DMACAPABILITY 0x10 /* 32-bit */
81 #define IOAT_CHANNELREG_OFFSET 0x80
83 /* Channel Registers */
84 #define IOAT_CHAN_CTL 0x0 /* 16-bit */
85 #define IOAT_CHAN_COMP 0x2 /* 16-bit */
86 #define IOAT_CHAN_CMPL_LO 0x18 /* 32-bit */
87 #define IOAT_CHAN_CMPL_HI 0x1C /* 32-bit */
88 #define IOAT_CHAN_ERR 0x28 /* 32-bit */
89 #define IOAT_CHAN_ERRMASK 0x2C /* 32-bit */
90 #define IOAT_CHAN_DCACTRL 0x30 /* 32-bit */
92 #define IOAT_V1_CHAN_STS_LO 0x4 /* 32-bit */
93 #define IOAT_V1_CHAN_STS_HI 0x8 /* 32-bit */
94 #define IOAT_V1_CHAN_ADDR_LO 0x0C /* 32-bit */
95 #define IOAT_V1_CHAN_ADDR_HI 0x10 /* 32-bit */
96 #define IOAT_V1_CHAN_CMD 0x14 /* 8-bit */
98 #define IOAT_V2_CHAN_CMD 0x4 /* 8-bit */
99 #define IOAT_V2_CHAN_CNT 0x6 /* 16-bit */
100 #define IOAT_V2_CHAN_STS_LO 0x8 /* 32-bit */
101 #define IOAT_V2_CHAN_STS_HI 0xC /* 32-bit */
102 #define IOAT_V2_CHAN_ADDR_LO 0x10 /* 32-bit */
103 #define IOAT_V2_CHAN_ADDR_HI 0x14 /* 32-bit */
105 #define IOAT_CHAN_STS_ADDR_MASK 0xFFFFFFFFFFFFFFC0
106 #define IOAT_CHAN_STS_XFER_MASK 0x3F
107 #define IOAT_CHAN_STS_FAIL_MASK 0x6
108 #define IOAT_CMPL_INDEX(channel) \
109 (((*channel->ic_cmpl & IOAT_CHAN_STS_ADDR_MASK) - \
110 ring->cr_phys_desc) >> 6)
111 #define IOAT_CMPL_FAILED(channel) \
112 (*channel->ic_cmpl & IOAT_CHAN_STS_FAIL_MASK)
115 typedef struct ioat_chan_desc_s
{
120 uint64_t dd_next_desc
;
127 /* dca dd_ctrl bits */
128 #define IOAT_DESC_CTRL_OP_CNTX ((uint32_t)0xFF << 24)
129 #define IOAT_DESC_CTRL_CNTX_CHNG 0x1
130 typedef struct ioat_chan_dca_desc_s
{
135 uint64_t dd_next_desc
;
140 } ioat_chan_dca_desc_t
;
142 /* dma dd_ctrl bits */
143 #define IOAT_DESC_CTRL_OP_DMA (0x0 << 24)
144 #define IOAT_DESC_DMACTRL_NULL 0x20
145 #define IOAT_DESC_CTRL_FENCE 0x10
146 #define IOAT_DESC_CTRL_CMPL 0x8
147 #define IOAT_DESC_CTRL_NODSTSNP 0x4
148 #define IOAT_DESC_CTRL_NOSRCSNP 0x2
149 #define IOAT_DESC_CTRL_INTR 0x1
150 typedef struct ioat_chan_dma_desc_s
{
153 uint64_t dd_src_paddr
;
154 uint64_t dd_dest_paddr
;
155 uint64_t dd_next_desc
;
156 uint64_t dd_next_src_paddr
; /* v2 only */
157 uint64_t dd_next_dest_paddr
; /* v2 only */
160 } ioat_chan_dma_desc_t
;
168 /* ioat private data per command */
169 typedef struct ioat_cmd_private_s
{
170 uint64_t ip_generation
;
174 } ioat_cmd_private_t
;
176 /* descriptor ring state */
177 typedef struct ioat_channel_ring_s
{
178 /* protects cr_cmpl_gen & cr_cmpl_last */
179 kmutex_t cr_cmpl_mutex
;
181 /* desc ring generation for the last completion we saw */
182 uint64_t cr_cmpl_gen
;
184 /* last descriptor index we saw complete */
185 uint64_t cr_cmpl_last
;
187 /* protects cr_desc_* */
188 kmutex_t cr_desc_mutex
;
191 * last descriptor posted. used to update its next pointer when we
192 * add a new desc. Also used to tack the completion (See comment for
195 uint64_t cr_desc_prev
;
197 /* where to put the next descriptor */
198 uint64_t cr_desc_next
;
200 /* what the current desc ring generation is */
201 uint64_t cr_desc_gen
;
204 * used during cmd_post to track the last desc posted. cr_desc_next
205 * and cr_desc_gen will be pointing to the next free desc after
206 * writing the descriptor to the ring. But we want to track the
207 * completion for the last descriptor posted.
209 uint64_t cr_desc_gen_prev
;
211 /* the last desc in the ring (for wrap) */
212 uint64_t cr_desc_last
;
214 /* pointer to the head of the ring */
215 ioat_chan_desc_t
*cr_desc
;
217 /* physical address of the head of the ring */
218 uint64_t cr_phys_desc
;
220 /* back pointer to the channel state */
221 struct ioat_channel_s
*cr_chan
;
223 /* for CB v2, number of desc posted (written to IOAT_V2_CHAN_CNT) */
225 } ioat_channel_ring_t
;
227 /* track channel state so we can handle a failure */
230 IOAT_CHANNEL_IN_FAILURE
= 1
231 } ic_channel_state_t
;
233 typedef struct ioat_channel_s
*ioat_channel_t
;
234 struct ioat_channel_s
{
235 /* channel's ring state */
236 ioat_channel_ring_t
*ic_ring
;
238 /* IOAT_CBv1 || IOAT_CBv2 */
239 ioat_version_t ic_ver
;
242 * state to determine if it's OK to post the the channel and if all
243 * future polls should return failure.
245 ic_channel_state_t ic_channel_state
;
247 /* channel command cache (*_cmd_alloc, *_cmd_free, etc) */
248 kmem_cache_t
*ic_cmd_cache
;
250 /* dcopy state for dcopy_device_channel_notify() call */
251 dcopy_handle_t ic_dcopy_handle
;
253 /* location in memory where completions are DMA'ed into */
254 volatile uint64_t *ic_cmpl
;
256 /* channel specific registers */
259 /* if this channel is using DCA */
260 boolean_t ic_dca_active
;
262 /* DCA ID the channel is currently pointing to */
263 uint32_t ic_dca_current
;
265 /* devices channel number */
268 /* number of descriptors in ring */
269 uint_t ic_chan_desc_cnt
;
271 /* descriptor ring alloc state */
272 ddi_dma_handle_t ic_desc_dma_handle
;
273 size_t ic_desc_alloc_size
;
274 ddi_acc_handle_t ic_desc_handle
;
275 ddi_dma_cookie_t ic_desc_cookies
;
277 /* completion buffer alloc state */
278 ddi_dma_handle_t ic_cmpl_dma_handle
;
279 size_t ic_cmpl_alloc_size
;
280 ddi_acc_handle_t ic_cmpl_handle
;
281 ddi_dma_cookie_t ic_cmpl_cookie
;
282 uint64_t ic_phys_cmpl
;
284 /* if inuse, we need to re-init the channel during resume */
287 /* backpointer to driver state */
288 struct ioat_state_s
*ic_state
;
291 typedef struct ioat_rs_s
*ioat_rs_hdl_t
;
294 typedef struct ioat_state_s
{
300 /* register handle and pointer to registers */
301 ddi_acc_handle_t is_reg_handle
;
304 /* IOAT_CBv1 || IOAT_CBv2 */
305 ioat_version_t is_ver
;
308 ioat_channel_t is_channel
;
310 ioat_rs_hdl_t is_channel_rs
;
312 ddi_iblock_cookie_t is_iblock_cookie
;
316 uint_t is_num_channels
;
321 uint_t is_capabilities
;
323 /* dcopy_device_register()/dcopy_device_unregister() state */
324 dcopy_device_handle_t is_device_handle
;
325 dcopy_device_info_t is_deviceinfo
;
329 int ioat_ioctl(dev_t dev
, int cmd
, intptr_t arg
, int mode
, cred_t
*cred
,
332 void ioat_rs_init(ioat_state_t
*state
, uint_t min_val
, uint_t max_val
,
333 ioat_rs_hdl_t
*handle
);
334 void ioat_rs_fini(ioat_rs_hdl_t
*handle
);
335 int ioat_rs_alloc(ioat_rs_hdl_t handle
, uint_t
*rs
);
336 void ioat_rs_free(ioat_rs_hdl_t handle
, uint_t rs
);
338 int ioat_channel_init(ioat_state_t
*state
);
339 void ioat_channel_fini(ioat_state_t
*state
);
340 void ioat_channel_suspend(ioat_state_t
*state
);
341 int ioat_channel_resume(ioat_state_t
*state
);
342 void ioat_channel_quiesce(ioat_state_t
*);
344 int ioat_channel_alloc(void *device_private
, dcopy_handle_t handle
, int flags
,
345 uint_t size
, dcopy_query_channel_t
*info
, void *channel_private
);
346 void ioat_channel_free(void *channel_private
);
347 void ioat_channel_intr(ioat_channel_t channel
);
348 int ioat_cmd_alloc(void *channel
, int flags
, dcopy_cmd_t
*cmd
);
349 void ioat_cmd_free(void *channel
, dcopy_cmd_t
*cmd
);
350 int ioat_cmd_post(void *channel
, dcopy_cmd_t cmd
);
351 int ioat_cmd_poll(void *channel
, dcopy_cmd_t cmd
);
352 void ioat_unregister_complete(void *device_private
, int status
);
361 #endif /* _SYS_IOAT_H */