2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #ifndef __IWCH_PROVIDER_H__
33 #define __IWCH_PROVIDER_H__
35 #include <linux/list.h>
36 #include <linux/spinlock.h>
37 #include <rdma/ib_verbs.h>
38 #include <asm/types.h>
50 static inline struct iwch_pd
*to_iwch_pd(struct ib_pd
*ibpd
)
52 return container_of(ibpd
, struct iwch_pd
, ibpd
);
55 struct tpt_attributes
{
60 enum tpt_mem_perm perms
;
61 u32 remote_invaliate_disable
:1;
79 struct tpt_attributes attr
;
84 typedef struct iwch_mw iwch_mw_handle
;
86 static inline struct iwch_mr
*to_iwch_mr(struct ib_mr
*ibmr
)
88 return container_of(ibmr
, struct iwch_mr
, ibmr
);
95 struct tpt_attributes attr
;
98 static inline struct iwch_mw
*to_iwch_mw(struct ib_mw
*ibmw
)
100 return container_of(ibmw
, struct iwch_mw
, ibmw
);
105 struct iwch_dev
*rhp
;
108 spinlock_t comp_handler_lock
;
110 wait_queue_head_t wait
;
111 u32 __user
*user_rptr_addr
;
114 static inline struct iwch_cq
*to_iwch_cq(struct ib_cq
*ibcq
)
116 return container_of(ibcq
, struct iwch_cq
, ibcq
);
123 struct iwch_mpa_attributes
{
125 u8 recv_marker_enabled
;
126 u8 xmit_marker_enabled
; /* iWARP: enable inbound Read Resp. */
128 u8 version
; /* 0 or 1 */
131 struct iwch_qp_attributes
{
137 u32 sq_max_sges_rdma_write
;
141 u8 enable_rdma_write
; /* enable inbound Read Resp. */
143 u8 enable_mmid0_fastreg
; /* Enable STAG0 + Fast-register */
145 * Next QP state. If specify the current state, only the
146 * QP attributes will be modified.
152 char terminate_buffer
[52];
153 u32 terminate_msg_len
;
154 u8 is_terminate_local
;
155 struct iwch_mpa_attributes mpa_attr
; /* IN-OUT */
156 struct iwch_ep
*llp_stream_handle
;
157 char *stream_msg_buf
; /* Last stream msg. before Idle -> RTS */
158 u32 stream_msg_buf_len
; /* Only on Idle -> RTS */
163 struct iwch_dev
*rhp
;
165 struct iwch_qp_attributes attr
;
169 wait_queue_head_t wait
;
170 enum IWCH_QP_FLAGS flags
;
171 struct timer_list timer
;
174 static inline int qp_quiesced(struct iwch_qp
*qhp
)
176 return qhp
->flags
& QP_QUIESCED
;
179 static inline struct iwch_qp
*to_iwch_qp(struct ib_qp
*ibqp
)
181 return container_of(ibqp
, struct iwch_qp
, ibqp
);
184 void iwch_qp_add_ref(struct ib_qp
*qp
);
185 void iwch_qp_rem_ref(struct ib_qp
*qp
);
187 struct iwch_ucontext
{
188 struct ib_ucontext ibucontext
;
189 struct cxio_ucontext uctx
;
191 spinlock_t mmap_lock
;
192 struct list_head mmaps
;
195 static inline struct iwch_ucontext
*to_iwch_ucontext(struct ib_ucontext
*c
)
197 return container_of(c
, struct iwch_ucontext
, ibucontext
);
200 struct iwch_mm_entry
{
201 struct list_head entry
;
207 static inline struct iwch_mm_entry
*remove_mmap(struct iwch_ucontext
*ucontext
,
208 u32 key
, unsigned len
)
210 struct list_head
*pos
, *nxt
;
211 struct iwch_mm_entry
*mm
;
213 spin_lock(&ucontext
->mmap_lock
);
214 list_for_each_safe(pos
, nxt
, &ucontext
->mmaps
) {
216 mm
= list_entry(pos
, struct iwch_mm_entry
, entry
);
217 if (mm
->key
== key
&& mm
->len
== len
) {
218 list_del_init(&mm
->entry
);
219 spin_unlock(&ucontext
->mmap_lock
);
220 PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__
,
221 key
, (unsigned long long) mm
->addr
, mm
->len
);
225 spin_unlock(&ucontext
->mmap_lock
);
229 static inline void insert_mmap(struct iwch_ucontext
*ucontext
,
230 struct iwch_mm_entry
*mm
)
232 spin_lock(&ucontext
->mmap_lock
);
233 PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__
,
234 mm
->key
, (unsigned long long) mm
->addr
, mm
->len
);
235 list_add_tail(&mm
->entry
, &ucontext
->mmaps
);
236 spin_unlock(&ucontext
->mmap_lock
);
239 enum iwch_qp_attr_mask
{
240 IWCH_QP_ATTR_NEXT_STATE
= 1 << 0,
241 IWCH_QP_ATTR_ENABLE_RDMA_READ
= 1 << 7,
242 IWCH_QP_ATTR_ENABLE_RDMA_WRITE
= 1 << 8,
243 IWCH_QP_ATTR_ENABLE_RDMA_BIND
= 1 << 9,
244 IWCH_QP_ATTR_MAX_ORD
= 1 << 11,
245 IWCH_QP_ATTR_MAX_IRD
= 1 << 12,
246 IWCH_QP_ATTR_LLP_STREAM_HANDLE
= 1 << 22,
247 IWCH_QP_ATTR_STREAM_MSG_BUFFER
= 1 << 23,
248 IWCH_QP_ATTR_MPA_ATTR
= 1 << 24,
249 IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE
= 1 << 25,
250 IWCH_QP_ATTR_VALID_MODIFY
= (IWCH_QP_ATTR_ENABLE_RDMA_READ
|
251 IWCH_QP_ATTR_ENABLE_RDMA_WRITE
|
252 IWCH_QP_ATTR_MAX_ORD
|
253 IWCH_QP_ATTR_MAX_IRD
|
254 IWCH_QP_ATTR_LLP_STREAM_HANDLE
|
255 IWCH_QP_ATTR_STREAM_MSG_BUFFER
|
256 IWCH_QP_ATTR_MPA_ATTR
|
257 IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE
)
260 int iwch_modify_qp(struct iwch_dev
*rhp
,
262 enum iwch_qp_attr_mask mask
,
263 struct iwch_qp_attributes
*attrs
,
270 IWCH_QP_STATE_TERMINATE
,
271 IWCH_QP_STATE_CLOSING
,
275 static inline int iwch_convert_state(enum ib_qp_state ib_state
)
280 return IWCH_QP_STATE_IDLE
;
282 return IWCH_QP_STATE_RTS
;
284 return IWCH_QP_STATE_CLOSING
;
286 return IWCH_QP_STATE_TERMINATE
;
288 return IWCH_QP_STATE_ERROR
;
294 static inline u32
iwch_ib_to_tpt_access(int acc
)
296 return (acc
& IB_ACCESS_REMOTE_WRITE
? TPT_REMOTE_WRITE
: 0) |
297 (acc
& IB_ACCESS_REMOTE_READ
? TPT_REMOTE_READ
: 0) |
298 (acc
& IB_ACCESS_LOCAL_WRITE
? TPT_LOCAL_WRITE
: 0) |
299 (acc
& IB_ACCESS_MW_BIND
? TPT_MW_BIND
: 0) |
303 static inline u32
iwch_ib_to_tpt_bind_access(int acc
)
305 return (acc
& IB_ACCESS_REMOTE_WRITE
? TPT_REMOTE_WRITE
: 0) |
306 (acc
& IB_ACCESS_REMOTE_READ
? TPT_REMOTE_READ
: 0);
309 enum iwch_mmid_state
{
310 IWCH_STAG_STATE_VALID
,
311 IWCH_STAG_STATE_INVALID
314 enum iwch_qp_query_flags
{
315 IWCH_QP_QUERY_CONTEXT_NONE
= 0x0, /* No ctx; Only attrs */
316 IWCH_QP_QUERY_CONTEXT_GET
= 0x1, /* Get ctx + attrs */
317 IWCH_QP_QUERY_CONTEXT_SUSPEND
= 0x2, /* Not Supported */
320 * Quiesce QP context; Consumer
321 * will NOT replay outstanding WR
323 IWCH_QP_QUERY_CONTEXT_QUIESCE
= 0x4,
324 IWCH_QP_QUERY_CONTEXT_REMOVE
= 0x8,
325 IWCH_QP_QUERY_TEST_USERWRITE
= 0x32 /* Test special */
328 u16
iwch_rqes_posted(struct iwch_qp
*qhp
);
329 int iwch_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
330 struct ib_send_wr
**bad_wr
);
331 int iwch_post_receive(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
332 struct ib_recv_wr
**bad_wr
);
333 int iwch_bind_mw(struct ib_qp
*qp
,
335 struct ib_mw_bind
*mw_bind
);
336 int iwch_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*wc
);
337 int iwch_post_terminate(struct iwch_qp
*qhp
, struct respQ_msg_t
*rsp_msg
);
338 int iwch_post_zb_read(struct iwch_ep
*ep
);
339 int iwch_register_device(struct iwch_dev
*dev
);
340 void iwch_unregister_device(struct iwch_dev
*dev
);
341 void stop_read_rep_timer(struct iwch_qp
*qhp
);
342 int iwch_register_mem(struct iwch_dev
*rhp
, struct iwch_pd
*php
,
343 struct iwch_mr
*mhp
, int shift
);
344 int iwch_reregister_mem(struct iwch_dev
*rhp
, struct iwch_pd
*php
,
348 int iwch_alloc_pbl(struct iwch_mr
*mhp
, int npages
);
349 void iwch_free_pbl(struct iwch_mr
*mhp
);
350 int iwch_write_pbl(struct iwch_mr
*mhp
, __be64
*pages
, int npages
, int offset
);
351 int build_phys_page_list(struct ib_phys_buf
*buffer_list
,
360 #define IWCH_NODE_DESC "cxgb3 Chelsio Communications"