1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2015 - 2020 Intel Corporation */
6 #define irdma_handle void *
7 #define irdma_adapter_handle irdma_handle
8 #define irdma_qp_handle irdma_handle
9 #define irdma_cq_handle irdma_handle
10 #define irdma_pd_id irdma_handle
11 #define irdma_stag_handle irdma_handle
12 #define irdma_stag_index u32
13 #define irdma_stag u32
14 #define irdma_stag_key u8
15 #define irdma_tagged_offset u64
16 #define irdma_access_privileges u32
17 #define irdma_physical_fragment u64
18 #define irdma_address_list u64 *
20 #define IRDMA_MAX_MR_SIZE 0x200000000000ULL
22 #define IRDMA_ACCESS_FLAGS_LOCALREAD 0x01
23 #define IRDMA_ACCESS_FLAGS_LOCALWRITE 0x02
24 #define IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY 0x04
25 #define IRDMA_ACCESS_FLAGS_REMOTEREAD 0x05
26 #define IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY 0x08
27 #define IRDMA_ACCESS_FLAGS_REMOTEWRITE 0x0a
28 #define IRDMA_ACCESS_FLAGS_BIND_WINDOW 0x10
29 #define IRDMA_ACCESS_FLAGS_ZERO_BASED 0x20
30 #define IRDMA_ACCESS_FLAGS_ALL 0x3f
32 #define IRDMA_OP_TYPE_RDMA_WRITE 0x00
33 #define IRDMA_OP_TYPE_RDMA_READ 0x01
34 #define IRDMA_OP_TYPE_SEND 0x03
35 #define IRDMA_OP_TYPE_SEND_INV 0x04
36 #define IRDMA_OP_TYPE_SEND_SOL 0x05
37 #define IRDMA_OP_TYPE_SEND_SOL_INV 0x06
38 #define IRDMA_OP_TYPE_RDMA_WRITE_SOL 0x0d
39 #define IRDMA_OP_TYPE_BIND_MW 0x08
40 #define IRDMA_OP_TYPE_FAST_REG_NSMR 0x09
41 #define IRDMA_OP_TYPE_INV_STAG 0x0a
42 #define IRDMA_OP_TYPE_RDMA_READ_INV_STAG 0x0b
43 #define IRDMA_OP_TYPE_NOP 0x0c
44 #define IRDMA_OP_TYPE_REC 0x3e
45 #define IRDMA_OP_TYPE_REC_IMM 0x3f
47 #define IRDMA_FLUSH_MAJOR_ERR 1
49 enum irdma_device_caps_const
{
51 IRDMA_CQP_WQE_SIZE
= 8,
53 IRDMA_EXTENDED_CQE_SIZE
= 8,
56 IRDMA_CQP_CTX_SIZE
= 8,
57 IRDMA_SHADOW_AREA_SIZE
= 8,
58 IRDMA_QUERY_FPM_BUF_SIZE
= 176,
59 IRDMA_COMMIT_FPM_BUF_SIZE
= 176,
60 IRDMA_GATHER_STATS_BUF_SIZE
= 1024,
61 IRDMA_MIN_IW_QP_ID
= 0,
62 IRDMA_MAX_IW_QP_ID
= 262143,
64 IRDMA_MAX_CEQID
= 1023,
65 IRDMA_CEQ_MAX_COUNT
= IRDMA_MAX_CEQID
+ 1,
67 IRDMA_MAX_CQID
= 524287,
68 IRDMA_MIN_AEQ_ENTRIES
= 1,
69 IRDMA_MAX_AEQ_ENTRIES
= 524287,
70 IRDMA_MIN_CEQ_ENTRIES
= 1,
71 IRDMA_MAX_CEQ_ENTRIES
= 262143,
72 IRDMA_MIN_CQ_SIZE
= 1,
73 IRDMA_MAX_CQ_SIZE
= 1048575,
75 IRDMA_MAX_WQ_FRAGMENT_COUNT
= 13,
76 IRDMA_MAX_SGE_RD
= 13,
77 IRDMA_MAX_OUTBOUND_MSG_SIZE
= 2147483647,
78 IRDMA_MAX_INBOUND_MSG_SIZE
= 2147483647,
79 IRDMA_MAX_PUSH_PAGE_COUNT
= 1024,
80 IRDMA_MAX_PE_ENA_VF_COUNT
= 32,
81 IRDMA_MAX_VF_FPM_ID
= 47,
82 IRDMA_MAX_SQ_PAYLOAD_SIZE
= 2145386496,
83 IRDMA_MAX_INLINE_DATA_SIZE
= 101,
84 IRDMA_MAX_WQ_ENTRIES
= 32768,
85 IRDMA_Q2_BUF_SIZE
= 256,
86 IRDMA_QP_CTX_SIZE
= 256,
87 IRDMA_MAX_PDS
= 262144,
88 IRDMA_MIN_WQ_SIZE_GEN2
= 8,
91 enum irdma_addressing_type
{
92 IRDMA_ADDR_TYPE_ZERO_BASED
= 0,
93 IRDMA_ADDR_TYPE_VA_BASED
= 1,
96 enum irdma_flush_opcode
{
100 FLUSH_REM_ACCESS_ERR
,
107 FLUSH_REM_INV_REQ_ERR
,
110 enum irdma_cmpl_status
{
111 IRDMA_COMPL_STATUS_SUCCESS
= 0,
112 IRDMA_COMPL_STATUS_FLUSHED
,
113 IRDMA_COMPL_STATUS_INVALID_WQE
,
114 IRDMA_COMPL_STATUS_QP_CATASTROPHIC
,
115 IRDMA_COMPL_STATUS_REMOTE_TERMINATION
,
116 IRDMA_COMPL_STATUS_INVALID_STAG
,
117 IRDMA_COMPL_STATUS_BASE_BOUND_VIOLATION
,
118 IRDMA_COMPL_STATUS_ACCESS_VIOLATION
,
119 IRDMA_COMPL_STATUS_INVALID_PD_ID
,
120 IRDMA_COMPL_STATUS_WRAP_ERROR
,
121 IRDMA_COMPL_STATUS_STAG_INVALID_PDID
,
122 IRDMA_COMPL_STATUS_RDMA_READ_ZERO_ORD
,
123 IRDMA_COMPL_STATUS_QP_NOT_PRIVLEDGED
,
124 IRDMA_COMPL_STATUS_STAG_NOT_INVALID
,
125 IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_SIZE
,
126 IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_ENTRY
,
127 IRDMA_COMPL_STATUS_INVALID_FBO
,
128 IRDMA_COMPL_STATUS_INVALID_LEN
,
129 IRDMA_COMPL_STATUS_INVALID_ACCESS
,
130 IRDMA_COMPL_STATUS_PHYS_BUF_LIST_TOO_LONG
,
131 IRDMA_COMPL_STATUS_INVALID_VIRT_ADDRESS
,
132 IRDMA_COMPL_STATUS_INVALID_REGION
,
133 IRDMA_COMPL_STATUS_INVALID_WINDOW
,
134 IRDMA_COMPL_STATUS_INVALID_TOTAL_LEN
,
135 IRDMA_COMPL_STATUS_UNKNOWN
,
138 enum irdma_cmpl_notify
{
139 IRDMA_CQ_COMPL_EVENT
= 0,
140 IRDMA_CQ_COMPL_SOLICITED
= 1,
144 IRDMA_WRITE_WITH_IMM
= 1,
145 IRDMA_SEND_WITH_IMM
= 2,
152 struct irdma_qp_uk_init_info
;
153 struct irdma_cq_uk_init_info
;
162 __le64 buf
[IRDMA_CQE_SIZE
];
165 struct irdma_extended_cqe
{
166 __le64 buf
[IRDMA_EXTENDED_CQE_SIZE
];
169 struct irdma_post_send
{
170 struct ib_sge
*sg_list
;
177 struct irdma_post_rq_info
{
179 struct ib_sge
*sg_list
;
183 struct irdma_rdma_write
{
184 struct ib_sge
*lo_sg_list
;
186 struct ib_sge rem_addr
;
189 struct irdma_rdma_read
{
190 struct ib_sge
*lo_sg_list
;
192 struct ib_sge rem_addr
;
195 struct irdma_bind_window
{
199 enum irdma_addressing_type addressing_type
;
203 bool mem_window_type_1
:1;
206 struct irdma_inv_local_stag
{
207 irdma_stag target_stag
;
210 struct irdma_post_sq_info
{
218 bool imm_data_valid
:1;
225 struct irdma_post_send send
;
226 struct irdma_rdma_write rdma_write
;
227 struct irdma_rdma_read rdma_read
;
228 struct irdma_bind_window bind_window
;
229 struct irdma_inv_local_stag inv_local_stag
;
233 struct irdma_cq_poll_info
{
235 irdma_qp_handle qp_handle
;
241 irdma_stag inv_stag
; /* or L_R_Key */
242 enum irdma_cmpl_status comp_status
;
249 bool stag_invalid_set
:1; /* or L_R_Key set */
251 bool solicited_event
:1;
253 bool ud_vlan_valid
:1;
254 bool ud_smac_valid
:1;
258 int irdma_uk_inline_rdma_write(struct irdma_qp_uk
*qp
,
259 struct irdma_post_sq_info
*info
, bool post_sq
);
260 int irdma_uk_inline_send(struct irdma_qp_uk
*qp
,
261 struct irdma_post_sq_info
*info
, bool post_sq
);
262 int irdma_uk_post_nop(struct irdma_qp_uk
*qp
, u64 wr_id
, bool signaled
,
264 int irdma_uk_post_receive(struct irdma_qp_uk
*qp
,
265 struct irdma_post_rq_info
*info
);
266 void irdma_uk_qp_post_wr(struct irdma_qp_uk
*qp
);
267 int irdma_uk_rdma_read(struct irdma_qp_uk
*qp
, struct irdma_post_sq_info
*info
,
268 bool inv_stag
, bool post_sq
);
269 int irdma_uk_rdma_write(struct irdma_qp_uk
*qp
, struct irdma_post_sq_info
*info
,
271 int irdma_uk_send(struct irdma_qp_uk
*qp
, struct irdma_post_sq_info
*info
,
273 int irdma_uk_stag_local_invalidate(struct irdma_qp_uk
*qp
,
274 struct irdma_post_sq_info
*info
,
277 struct irdma_wqe_uk_ops
{
278 void (*iw_copy_inline_data
)(u8
*dest
, struct ib_sge
*sge_list
,
279 u32 num_sges
, u8 polarity
);
280 u16 (*iw_inline_data_size_to_quanta
)(u32 data_size
);
281 void (*iw_set_fragment
)(__le64
*wqe
, u32 offset
, struct ib_sge
*sge
,
283 void (*iw_set_mw_bind_wqe
)(__le64
*wqe
,
284 struct irdma_bind_window
*op_info
);
287 int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk
*cq
,
288 struct irdma_cq_poll_info
*info
);
289 void irdma_uk_cq_request_notification(struct irdma_cq_uk
*cq
,
290 enum irdma_cmpl_notify cq_notify
);
291 void irdma_uk_cq_resize(struct irdma_cq_uk
*cq
, void *cq_base
, int size
);
292 void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk
*qp
, u16 cnt
);
293 void irdma_uk_cq_init(struct irdma_cq_uk
*cq
,
294 struct irdma_cq_uk_init_info
*info
);
295 int irdma_uk_qp_init(struct irdma_qp_uk
*qp
,
296 struct irdma_qp_uk_init_info
*info
);
297 void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info
*ukinfo
, u8
*sq_shift
,
299 int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info
*ukinfo
,
300 u32
*sq_depth
, u8
*sq_shift
);
301 int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info
*ukinfo
,
302 u32
*rq_depth
, u8
*rq_shift
);
303 struct irdma_sq_uk_wr_trk_info
{
310 struct irdma_qp_quanta
{
311 __le64 elem
[IRDMA_WQE_SIZE
];
315 struct irdma_qp_quanta
*sq_base
;
316 struct irdma_qp_quanta
*rq_base
;
317 struct irdma_uk_attrs
*uk_attrs
;
318 u32 __iomem
*wqe_alloc_db
;
319 struct irdma_sq_uk_wr_trk_info
*sq_wrtrk_array
;
322 struct irdma_ring sq_ring
;
323 struct irdma_ring rq_ring
;
324 struct irdma_ring initial_ring
;
332 struct irdma_wqe_uk_ops wqe_ops
;
336 u8 swqe_polarity_deferred
;
339 u8 rq_wqe_size_multiplier
;
340 bool deferred_flag
:1;
342 bool sq_flush_complete
:1; /* Indicates flush was seen and SQ was empty after the flush */
343 bool rq_flush_complete
:1; /* Indicates flush was seen and RQ was empty after the flush */
344 bool destroy_pending
:1; /* Indicates the QP is being destroyed */
352 struct irdma_cqe
*cq_base
;
353 u32 __iomem
*cqe_alloc_db
;
354 u32 __iomem
*cq_ack_db
;
358 struct irdma_ring cq_ring
;
360 bool avoid_mem_cflct
:1;
363 struct irdma_qp_uk_init_info
{
364 struct irdma_qp_quanta
*sq
;
365 struct irdma_qp_quanta
*rq
;
366 struct irdma_uk_attrs
*uk_attrs
;
367 u32 __iomem
*wqe_alloc_db
;
369 struct irdma_sq_uk_wr_trk_info
*sq_wrtrk_array
;
388 struct irdma_cq_uk_init_info
{
389 u32 __iomem
*cqe_alloc_db
;
390 u32 __iomem
*cq_ack_db
;
391 struct irdma_cqe
*cq_base
;
395 bool avoid_mem_cflct
;
398 __le64
*irdma_qp_get_next_send_wqe(struct irdma_qp_uk
*qp
, u32
*wqe_idx
,
399 u16 quanta
, u32 total_size
,
400 struct irdma_post_sq_info
*info
);
401 __le64
*irdma_qp_get_next_recv_wqe(struct irdma_qp_uk
*qp
, u32
*wqe_idx
);
402 void irdma_uk_clean_cq(void *q
, struct irdma_cq_uk
*cq
);
403 int irdma_nop(struct irdma_qp_uk
*qp
, u64 wr_id
, bool signaled
, bool post_sq
);
404 int irdma_fragcnt_to_quanta_sq(u32 frag_cnt
, u16
*quanta
);
405 int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt
, u16
*wqe_size
);
406 void irdma_get_wqe_shift(struct irdma_uk_attrs
*uk_attrs
, u32 sge
,
407 u32 inline_data
, u8
*shift
);
408 int irdma_get_sqdepth(struct irdma_uk_attrs
*uk_attrs
, u32 sq_size
, u8 shift
,
410 int irdma_get_rqdepth(struct irdma_uk_attrs
*uk_attrs
, u32 rq_size
, u8 shift
,
412 void irdma_clr_wqes(struct irdma_qp_uk
*qp
, u32 qp_wqe_idx
);
413 #endif /* IRDMA_USER_H */