1 /* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
3 /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
4 /* Copyright (c) 2008-2019, IBM Corporation */
9 #include <rdma/rdma_user_cm.h> /* RDMA_MAX_PRIVATE_DATA */
10 #include <linux/types.h>
11 #include <asm/byteorder.h>
13 #define RDMAP_VERSION 1
15 #define MPA_REVISION_1 1
16 #define MPA_REVISION_2 2
17 #define MPA_MAX_PRIVDATA RDMA_MAX_PRIVATE_DATA
18 #define MPA_KEY_REQ "MPA ID Req Frame"
19 #define MPA_KEY_REP "MPA ID Rep Frame"
20 #define MPA_IRD_ORD_MASK 0x3fff
22 struct mpa_rr_params
{
28 * MPA request/response header bits & fields
31 MPA_RR_FLAG_MARKERS
= cpu_to_be16(0x8000),
32 MPA_RR_FLAG_CRC
= cpu_to_be16(0x4000),
33 MPA_RR_FLAG_REJECT
= cpu_to_be16(0x2000),
34 MPA_RR_FLAG_ENHANCED
= cpu_to_be16(0x1000),
35 MPA_RR_FLAG_GSO_EXP
= cpu_to_be16(0x0800),
36 MPA_RR_MASK_REVISION
= cpu_to_be16(0x00ff)
40 * MPA request/reply header
44 struct mpa_rr_params params
;
47 static inline void __mpa_rr_set_revision(__be16
*bits
, u8 rev
)
49 *bits
= (*bits
& ~MPA_RR_MASK_REVISION
) |
50 (cpu_to_be16(rev
) & MPA_RR_MASK_REVISION
);
53 static inline u8
__mpa_rr_revision(__be16 mpa_rr_bits
)
55 __be16 rev
= mpa_rr_bits
& MPA_RR_MASK_REVISION
;
57 return be16_to_cpu(rev
);
61 MPA_V2_PEER_TO_PEER
= cpu_to_be16(0x8000),
62 MPA_V2_ZERO_LENGTH_RTR
= cpu_to_be16(0x4000),
63 MPA_V2_RDMA_WRITE_RTR
= cpu_to_be16(0x8000),
64 MPA_V2_RDMA_READ_RTR
= cpu_to_be16(0x4000),
65 MPA_V2_RDMA_NO_RTR
= cpu_to_be16(0x0000),
66 MPA_V2_MASK_IRD_ORD
= cpu_to_be16(0x3fff)
76 __be16 fpdu_hmd
; /* FPDU header-marker distance (= MPA's FPDUPTR) */
87 #define MPA_HDR_SIZE 2
88 #define MPA_CRC_SIZE 4
91 * Common portion of iWARP headers (MPA, DDP, RDMAP)
96 __be16 ddp_rdmap_ctrl
;
100 * DDP/RDMAP Hdr bits & fields
103 DDP_FLAG_TAGGED
= cpu_to_be16(0x8000),
104 DDP_FLAG_LAST
= cpu_to_be16(0x4000),
105 DDP_MASK_RESERVED
= cpu_to_be16(0x3C00),
106 DDP_MASK_VERSION
= cpu_to_be16(0x0300),
107 RDMAP_MASK_VERSION
= cpu_to_be16(0x00C0),
108 RDMAP_MASK_RESERVED
= cpu_to_be16(0x0030),
109 RDMAP_MASK_OPCODE
= cpu_to_be16(0x000f)
112 static inline u8
__ddp_get_version(struct iwarp_ctrl
*ctrl
)
114 return be16_to_cpu(ctrl
->ddp_rdmap_ctrl
& DDP_MASK_VERSION
) >> 8;
117 static inline void __ddp_set_version(struct iwarp_ctrl
*ctrl
, u8 version
)
119 ctrl
->ddp_rdmap_ctrl
=
120 (ctrl
->ddp_rdmap_ctrl
& ~DDP_MASK_VERSION
) |
121 (cpu_to_be16((u16
)version
<< 8) & DDP_MASK_VERSION
);
124 static inline u8
__rdmap_get_version(struct iwarp_ctrl
*ctrl
)
126 __be16 ver
= ctrl
->ddp_rdmap_ctrl
& RDMAP_MASK_VERSION
;
128 return be16_to_cpu(ver
) >> 6;
131 static inline void __rdmap_set_version(struct iwarp_ctrl
*ctrl
, u8 version
)
133 ctrl
->ddp_rdmap_ctrl
= (ctrl
->ddp_rdmap_ctrl
& ~RDMAP_MASK_VERSION
) |
134 (cpu_to_be16(version
<< 6) & RDMAP_MASK_VERSION
);
137 static inline u8
__rdmap_get_opcode(struct iwarp_ctrl
*ctrl
)
139 return be16_to_cpu(ctrl
->ddp_rdmap_ctrl
& RDMAP_MASK_OPCODE
);
142 static inline void __rdmap_set_opcode(struct iwarp_ctrl
*ctrl
, u8 opcode
)
144 ctrl
->ddp_rdmap_ctrl
= (ctrl
->ddp_rdmap_ctrl
& ~RDMAP_MASK_OPCODE
) |
145 (cpu_to_be16(opcode
) & RDMAP_MASK_OPCODE
);
148 struct iwarp_rdma_write
{
149 struct iwarp_ctrl ctrl
;
154 struct iwarp_rdma_rreq
{
155 struct iwarp_ctrl ctrl
;
167 struct iwarp_rdma_rresp
{
168 struct iwarp_ctrl ctrl
;
174 struct iwarp_ctrl ctrl
;
181 struct iwarp_send_inv
{
182 struct iwarp_ctrl ctrl
;
189 struct iwarp_terminate
{
190 struct iwarp_ctrl ctrl
;
195 #if defined(__LITTLE_ENDIAN_BITFIELD)
202 __be32 reserved
: 13;
203 #elif defined(__BIG_ENDIAN_BITFIELD)
204 __be32 reserved
: 13;
212 #error "undefined byte order"
217 * Terminate Hdr bits & fields
220 TERM_MASK_LAYER
= cpu_to_be32(0xf0000000),
221 TERM_MASK_ETYPE
= cpu_to_be32(0x0f000000),
222 TERM_MASK_ECODE
= cpu_to_be32(0x00ff0000),
223 TERM_FLAG_M
= cpu_to_be32(0x00008000),
224 TERM_FLAG_D
= cpu_to_be32(0x00004000),
225 TERM_FLAG_R
= cpu_to_be32(0x00002000),
226 TERM_MASK_RESVD
= cpu_to_be32(0x00001fff)
229 static inline u8
__rdmap_term_layer(struct iwarp_terminate
*term
)
234 static inline void __rdmap_term_set_layer(struct iwarp_terminate
*term
,
237 term
->layer
= layer
& 0xf;
240 static inline u8
__rdmap_term_etype(struct iwarp_terminate
*term
)
245 static inline void __rdmap_term_set_etype(struct iwarp_terminate
*term
,
248 term
->etype
= etype
& 0xf;
251 static inline u8
__rdmap_term_ecode(struct iwarp_terminate
*term
)
256 static inline void __rdmap_term_set_ecode(struct iwarp_terminate
*term
,
263 * Common portion of iWARP headers (MPA, DDP, RDMAP)
264 * for an FPDU carrying an untagged DDP segment
266 struct iwarp_ctrl_untagged
{
267 struct iwarp_ctrl ctrl
;
275 * Common portion of iWARP headers (MPA, DDP, RDMAP)
276 * for an FPDU carrying a tagged DDP segment
278 struct iwarp_ctrl_tagged
{
279 struct iwarp_ctrl ctrl
;
285 struct iwarp_ctrl ctrl
;
286 struct iwarp_ctrl_untagged c_untagged
;
287 struct iwarp_ctrl_tagged c_tagged
;
288 struct iwarp_rdma_write rwrite
;
289 struct iwarp_rdma_rreq rreq
;
290 struct iwarp_rdma_rresp rresp
;
291 struct iwarp_terminate terminate
;
292 struct iwarp_send send
;
293 struct iwarp_send_inv send_inv
;
297 TERM_ERROR_LAYER_RDMAP
= 0x00,
298 TERM_ERROR_LAYER_DDP
= 0x01,
299 TERM_ERROR_LAYER_LLP
= 0x02 /* eg., MPA */
303 DDP_ETYPE_CATASTROPHIC
= 0x0,
304 DDP_ETYPE_TAGGED_BUF
= 0x1,
305 DDP_ETYPE_UNTAGGED_BUF
= 0x2,
310 /* unspecified, set to zero */
311 DDP_ECODE_CATASTROPHIC
= 0x00,
312 /* Tagged Buffer Errors */
313 DDP_ECODE_T_INVALID_STAG
= 0x00,
314 DDP_ECODE_T_BASE_BOUNDS
= 0x01,
315 DDP_ECODE_T_STAG_NOT_ASSOC
= 0x02,
316 DDP_ECODE_T_TO_WRAP
= 0x03,
317 DDP_ECODE_T_VERSION
= 0x04,
318 /* Untagged Buffer Errors */
319 DDP_ECODE_UT_INVALID_QN
= 0x01,
320 DDP_ECODE_UT_INVALID_MSN_NOBUF
= 0x02,
321 DDP_ECODE_UT_INVALID_MSN_RANGE
= 0x03,
322 DDP_ECODE_UT_INVALID_MO
= 0x04,
323 DDP_ECODE_UT_MSG_TOOLONG
= 0x05,
324 DDP_ECODE_UT_VERSION
= 0x06
327 enum rdmap_untagged_qn
{
328 RDMAP_UNTAGGED_QN_SEND
= 0,
329 RDMAP_UNTAGGED_QN_RDMA_READ
= 1,
330 RDMAP_UNTAGGED_QN_TERMINATE
= 2,
331 RDMAP_UNTAGGED_QN_COUNT
= 3
335 RDMAP_ETYPE_CATASTROPHIC
= 0x0,
336 RDMAP_ETYPE_REMOTE_PROTECTION
= 0x1,
337 RDMAP_ETYPE_REMOTE_OPERATION
= 0x2
341 RDMAP_ECODE_INVALID_STAG
= 0x00,
342 RDMAP_ECODE_BASE_BOUNDS
= 0x01,
343 RDMAP_ECODE_ACCESS_RIGHTS
= 0x02,
344 RDMAP_ECODE_STAG_NOT_ASSOC
= 0x03,
345 RDMAP_ECODE_TO_WRAP
= 0x04,
346 RDMAP_ECODE_VERSION
= 0x05,
347 RDMAP_ECODE_OPCODE
= 0x06,
348 RDMAP_ECODE_CATASTROPHIC_STREAM
= 0x07,
349 RDMAP_ECODE_CATASTROPHIC_GLOBAL
= 0x08,
350 RDMAP_ECODE_CANNOT_INVALIDATE
= 0x09,
351 RDMAP_ECODE_UNSPECIFIED
= 0xff
355 LLP_ECODE_TCP_STREAM_LOST
= 0x01, /* How to transfer this ?? */
356 LLP_ECODE_RECEIVED_CRC
= 0x02,
357 LLP_ECODE_FPDU_START
= 0x03,
358 LLP_ECODE_INVALID_REQ_RESP
= 0x04,
360 /* Errors for Enhanced Connection Establishment only */
361 LLP_ECODE_LOCAL_CATASTROPHIC
= 0x05,
362 LLP_ECODE_INSUFFICIENT_IRD
= 0x06,
363 LLP_ECODE_NO_MATCHING_RTR
= 0x07
366 enum llp_etype
{ LLP_ETYPE_MPA
= 0x00 };
369 RDMAP_RDMA_WRITE
= 0x0,
370 RDMAP_RDMA_READ_REQ
= 0x1,
371 RDMAP_RDMA_READ_RESP
= 0x2,
373 RDMAP_SEND_INVAL
= 0x4,
375 RDMAP_SEND_SE_INVAL
= 0x6,
376 RDMAP_TERMINATE
= 0x7,
377 RDMAP_NOT_SUPPORTED
= RDMAP_TERMINATE
+ 1