2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39 int rxe_av_chk_attr(struct rxe_dev
*rxe
, struct rdma_ah_attr
*attr
);
41 void rxe_av_from_attr(u8 port_num
, struct rxe_av
*av
,
42 struct rdma_ah_attr
*attr
);
44 void rxe_av_to_attr(struct rxe_av
*av
, struct rdma_ah_attr
*attr
);
46 void rxe_av_fill_ip_info(struct rxe_av
*av
,
47 struct rdma_ah_attr
*attr
,
48 struct ib_gid_attr
*sgid_attr
,
51 struct rxe_av
*rxe_get_av(struct rxe_pkt_info
*pkt
);
54 int rxe_cq_chk_attr(struct rxe_dev
*rxe
, struct rxe_cq
*cq
,
55 int cqe
, int comp_vector
, struct ib_udata
*udata
);
57 int rxe_cq_from_init(struct rxe_dev
*rxe
, struct rxe_cq
*cq
, int cqe
,
58 int comp_vector
, struct ib_ucontext
*context
,
59 struct ib_udata
*udata
);
61 int rxe_cq_resize_queue(struct rxe_cq
*cq
, int new_cqe
, struct ib_udata
*udata
);
63 int rxe_cq_post(struct rxe_cq
*cq
, struct rxe_cqe
*cqe
, int solicited
);
65 void rxe_cq_disable(struct rxe_cq
*cq
);
67 void rxe_cq_cleanup(struct rxe_pool_entry
*arg
);
70 int rxe_mcast_get_grp(struct rxe_dev
*rxe
, union ib_gid
*mgid
,
71 struct rxe_mc_grp
**grp_p
);
73 int rxe_mcast_add_grp_elem(struct rxe_dev
*rxe
, struct rxe_qp
*qp
,
74 struct rxe_mc_grp
*grp
);
76 int rxe_mcast_drop_grp_elem(struct rxe_dev
*rxe
, struct rxe_qp
*qp
,
79 void rxe_drop_all_mcast_groups(struct rxe_qp
*qp
);
81 void rxe_mc_cleanup(struct rxe_pool_entry
*arg
);
84 struct rxe_mmap_info
{
85 struct list_head pending_mmaps
;
86 struct ib_ucontext
*context
;
93 void rxe_mmap_release(struct kref
*ref
);
95 struct rxe_mmap_info
*rxe_create_mmap_info(struct rxe_dev
*dev
,
97 struct ib_ucontext
*context
,
100 int rxe_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
);
103 enum copy_direction
{
108 int rxe_mem_init_dma(struct rxe_dev
*rxe
, struct rxe_pd
*pd
,
109 int access
, struct rxe_mem
*mem
);
111 int rxe_mem_init_user(struct rxe_dev
*rxe
, struct rxe_pd
*pd
, u64 start
,
112 u64 length
, u64 iova
, int access
, struct ib_udata
*udata
,
115 int rxe_mem_init_fast(struct rxe_dev
*rxe
, struct rxe_pd
*pd
,
116 int max_pages
, struct rxe_mem
*mem
);
118 int rxe_mem_copy(struct rxe_mem
*mem
, u64 iova
, void *addr
,
119 int length
, enum copy_direction dir
, u32
*crcp
);
121 int copy_data(struct rxe_dev
*rxe
, struct rxe_pd
*pd
, int access
,
122 struct rxe_dma_info
*dma
, void *addr
, int length
,
123 enum copy_direction dir
, u32
*crcp
);
125 void *iova_to_vaddr(struct rxe_mem
*mem
, u64 iova
, int length
);
132 struct rxe_mem
*lookup_mem(struct rxe_pd
*pd
, int access
, u32 key
,
133 enum lookup_type type
);
135 int mem_check_range(struct rxe_mem
*mem
, u64 iova
, size_t length
);
137 int rxe_mem_map_pages(struct rxe_dev
*rxe
, struct rxe_mem
*mem
,
138 u64
*page
, int num_pages
, u64 iova
);
140 void rxe_mem_cleanup(struct rxe_pool_entry
*arg
);
142 int advance_dma_data(struct rxe_dma_info
*dma
, unsigned int length
);
145 int rxe_loopback(struct sk_buff
*skb
);
146 int rxe_send(struct rxe_dev
*rxe
, struct rxe_pkt_info
*pkt
,
147 struct sk_buff
*skb
);
148 struct sk_buff
*rxe_init_packet(struct rxe_dev
*rxe
, struct rxe_av
*av
,
149 int paylen
, struct rxe_pkt_info
*pkt
);
150 int rxe_prepare(struct rxe_dev
*rxe
, struct rxe_pkt_info
*pkt
,
151 struct sk_buff
*skb
, u32
*crc
);
152 enum rdma_link_layer
rxe_link_layer(struct rxe_dev
*rxe
, unsigned int port_num
);
153 const char *rxe_parent_name(struct rxe_dev
*rxe
, unsigned int port_num
);
154 struct device
*rxe_dma_device(struct rxe_dev
*rxe
);
155 int rxe_mcast_add(struct rxe_dev
*rxe
, union ib_gid
*mgid
);
156 int rxe_mcast_delete(struct rxe_dev
*rxe
, union ib_gid
*mgid
);
159 int rxe_qp_chk_init(struct rxe_dev
*rxe
, struct ib_qp_init_attr
*init
);
161 int rxe_qp_from_init(struct rxe_dev
*rxe
, struct rxe_qp
*qp
, struct rxe_pd
*pd
,
162 struct ib_qp_init_attr
*init
, struct ib_udata
*udata
,
165 int rxe_qp_to_init(struct rxe_qp
*qp
, struct ib_qp_init_attr
*init
);
167 int rxe_qp_chk_attr(struct rxe_dev
*rxe
, struct rxe_qp
*qp
,
168 struct ib_qp_attr
*attr
, int mask
);
170 int rxe_qp_from_attr(struct rxe_qp
*qp
, struct ib_qp_attr
*attr
,
171 int mask
, struct ib_udata
*udata
);
173 int rxe_qp_to_attr(struct rxe_qp
*qp
, struct ib_qp_attr
*attr
, int mask
);
175 void rxe_qp_error(struct rxe_qp
*qp
);
177 void rxe_qp_destroy(struct rxe_qp
*qp
);
179 void rxe_qp_cleanup(struct rxe_pool_entry
*arg
);
181 static inline int qp_num(struct rxe_qp
*qp
)
183 return qp
->ibqp
.qp_num
;
186 static inline enum ib_qp_type
qp_type(struct rxe_qp
*qp
)
188 return qp
->ibqp
.qp_type
;
191 static inline enum ib_qp_state
qp_state(struct rxe_qp
*qp
)
193 return qp
->attr
.qp_state
;
196 static inline int qp_mtu(struct rxe_qp
*qp
)
198 if (qp
->ibqp
.qp_type
== IB_QPT_RC
|| qp
->ibqp
.qp_type
== IB_QPT_UC
)
199 return qp
->attr
.path_mtu
;
201 return RXE_PORT_MAX_MTU
;
204 static inline int rcv_wqe_size(int max_sge
)
206 return sizeof(struct rxe_recv_wqe
) +
207 max_sge
* sizeof(struct ib_sge
);
210 void free_rd_atomic_resource(struct rxe_qp
*qp
, struct resp_res
*res
);
212 static inline void rxe_advance_resp_resource(struct rxe_qp
*qp
)
215 if (unlikely(qp
->resp
.res_head
== qp
->attr
.max_dest_rd_atomic
))
216 qp
->resp
.res_head
= 0;
219 void retransmit_timer(struct timer_list
*t
);
220 void rnr_nak_timer(struct timer_list
*t
);
223 #define IB_SRQ_INIT_MASK (~IB_SRQ_LIMIT)
225 int rxe_srq_chk_attr(struct rxe_dev
*rxe
, struct rxe_srq
*srq
,
226 struct ib_srq_attr
*attr
, enum ib_srq_attr_mask mask
);
228 int rxe_srq_from_init(struct rxe_dev
*rxe
, struct rxe_srq
*srq
,
229 struct ib_srq_init_attr
*init
,
230 struct ib_ucontext
*context
, struct ib_udata
*udata
);
232 int rxe_srq_from_attr(struct rxe_dev
*rxe
, struct rxe_srq
*srq
,
233 struct ib_srq_attr
*attr
, enum ib_srq_attr_mask mask
,
234 struct ib_udata
*udata
);
236 void rxe_release(struct kref
*kref
);
238 int rxe_completer(void *arg
);
239 int rxe_requester(void *arg
);
240 int rxe_responder(void *arg
);
242 u32
rxe_icrc_hdr(struct rxe_pkt_info
*pkt
, struct sk_buff
*skb
);
244 void rxe_resp_queue_pkt(struct rxe_dev
*rxe
,
245 struct rxe_qp
*qp
, struct sk_buff
*skb
);
247 void rxe_comp_queue_pkt(struct rxe_dev
*rxe
,
248 struct rxe_qp
*qp
, struct sk_buff
*skb
);
250 static inline unsigned int wr_opcode_mask(int opcode
, struct rxe_qp
*qp
)
252 return rxe_wr_opcode_info
[opcode
].mask
[qp
->ibqp
.qp_type
];
255 static inline int rxe_xmit_packet(struct rxe_dev
*rxe
, struct rxe_qp
*qp
,
256 struct rxe_pkt_info
*pkt
, struct sk_buff
*skb
)
259 int is_request
= pkt
->mask
& RXE_REQ_MASK
;
261 if ((is_request
&& (qp
->req
.state
!= QP_STATE_READY
)) ||
262 (!is_request
&& (qp
->resp
.state
!= QP_STATE_READY
))) {
263 pr_info("Packet dropped. QP is not in ready state\n");
267 if (pkt
->mask
& RXE_LOOPBACK_MASK
) {
268 memcpy(SKB_TO_PKT(skb
), pkt
, sizeof(*pkt
));
269 err
= rxe_loopback(skb
);
271 err
= rxe_send(rxe
, pkt
, skb
);
276 rxe_counter_inc(rxe
, RXE_CNT_SEND_ERR
);
280 if ((qp_type(qp
) != IB_QPT_RC
) &&
281 (pkt
->mask
& RXE_END_MASK
)) {
282 pkt
->wqe
->state
= wqe_state_done
;
283 rxe_run_task(&qp
->comp
.task
, 1);
286 rxe_counter_inc(rxe
, RXE_CNT_SENT_PKTS
);
296 #endif /* RXE_LOC_H */