2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 /* extracted information about a packet carried in an sk_buff struct fits in
38 * the skbuff cb array. Must be at most 48 bytes. stored in control block of
39 * sk_buff for received packets.
42 struct rxe_dev
*rxe
; /* device that owns packet */
43 struct rxe_qp
*qp
; /* qp that owns packet */
44 struct rxe_send_wqe
*wqe
; /* send wqe */
45 u8
*hdr
; /* points to bth */
46 u32 mask
; /* useful info about pkt */
47 u32 psn
; /* bth psn of packet */
48 u16 pkey_index
; /* partition of pkt */
49 u16 paylen
; /* length of bth - icrc */
50 u8 port_num
; /* port pkt received on */
51 u8 opcode
; /* bth opcode of packet */
52 u8 offset
; /* bth offset from pkt->hdr */
55 /* Macros should be used only for received skb */
56 #define SKB_TO_PKT(skb) ((struct rxe_pkt_info *)(skb)->cb)
57 #define PKT_TO_SKB(pkt) container_of((void *)(pkt), struct sk_buff, cb)
60 * IBA header types and methods
62 * Some of these are for reference and completeness only since
63 * rxe does not currently support RD transport
64 * most of this could be moved into IB core. ib_pack.h has
65 * part of this but is incomplete
67 * Header specific routines to insert/extract values to/from headers
68 * the routines that are named __hhh_(set_)fff() take a pointer to a
69 * hhh header and get(set) the fff field. The routines named
70 * hhh_(set_)fff take a packet info struct and find the
71 * header and field based on the opcode in the packet.
72 * Conversion to/from network byte order from cpu order is also done.
75 #define RXE_ICRC_SIZE (4)
76 #define RXE_MAX_HDR_LENGTH (80)
78 /******************************************************************************
79 * Base Transport Header
80 ******************************************************************************/
90 #define BTH_DEF_PKEY (0xffff)
92 #define BTH_SE_MASK (0x80)
93 #define BTH_MIG_MASK (0x40)
94 #define BTH_PAD_MASK (0x30)
95 #define BTH_TVER_MASK (0x0f)
96 #define BTH_FECN_MASK (0x80000000)
97 #define BTH_BECN_MASK (0x40000000)
98 #define BTH_RESV6A_MASK (0x3f000000)
99 #define BTH_QPN_MASK (0x00ffffff)
100 #define BTH_ACK_MASK (0x80000000)
101 #define BTH_RESV7_MASK (0x7f000000)
102 #define BTH_PSN_MASK (0x00ffffff)
104 static inline u8
__bth_opcode(void *arg
)
106 struct rxe_bth
*bth
= arg
;
111 static inline void __bth_set_opcode(void *arg
, u8 opcode
)
113 struct rxe_bth
*bth
= arg
;
115 bth
->opcode
= opcode
;
118 static inline u8
__bth_se(void *arg
)
120 struct rxe_bth
*bth
= arg
;
122 return 0 != (BTH_SE_MASK
& bth
->flags
);
125 static inline void __bth_set_se(void *arg
, int se
)
127 struct rxe_bth
*bth
= arg
;
130 bth
->flags
|= BTH_SE_MASK
;
132 bth
->flags
&= ~BTH_SE_MASK
;
135 static inline u8
__bth_mig(void *arg
)
137 struct rxe_bth
*bth
= arg
;
139 return 0 != (BTH_MIG_MASK
& bth
->flags
);
142 static inline void __bth_set_mig(void *arg
, u8 mig
)
144 struct rxe_bth
*bth
= arg
;
147 bth
->flags
|= BTH_MIG_MASK
;
149 bth
->flags
&= ~BTH_MIG_MASK
;
152 static inline u8
__bth_pad(void *arg
)
154 struct rxe_bth
*bth
= arg
;
156 return (BTH_PAD_MASK
& bth
->flags
) >> 4;
159 static inline void __bth_set_pad(void *arg
, u8 pad
)
161 struct rxe_bth
*bth
= arg
;
163 bth
->flags
= (BTH_PAD_MASK
& (pad
<< 4)) |
164 (~BTH_PAD_MASK
& bth
->flags
);
167 static inline u8
__bth_tver(void *arg
)
169 struct rxe_bth
*bth
= arg
;
171 return BTH_TVER_MASK
& bth
->flags
;
174 static inline void __bth_set_tver(void *arg
, u8 tver
)
176 struct rxe_bth
*bth
= arg
;
178 bth
->flags
= (BTH_TVER_MASK
& tver
) |
179 (~BTH_TVER_MASK
& bth
->flags
);
182 static inline u16
__bth_pkey(void *arg
)
184 struct rxe_bth
*bth
= arg
;
186 return be16_to_cpu(bth
->pkey
);
189 static inline void __bth_set_pkey(void *arg
, u16 pkey
)
191 struct rxe_bth
*bth
= arg
;
193 bth
->pkey
= cpu_to_be16(pkey
);
196 static inline u32
__bth_qpn(void *arg
)
198 struct rxe_bth
*bth
= arg
;
200 return BTH_QPN_MASK
& be32_to_cpu(bth
->qpn
);
203 static inline void __bth_set_qpn(void *arg
, u32 qpn
)
205 struct rxe_bth
*bth
= arg
;
206 u32 resvqpn
= be32_to_cpu(bth
->qpn
);
208 bth
->qpn
= cpu_to_be32((BTH_QPN_MASK
& qpn
) |
209 (~BTH_QPN_MASK
& resvqpn
));
212 static inline int __bth_fecn(void *arg
)
214 struct rxe_bth
*bth
= arg
;
216 return 0 != (cpu_to_be32(BTH_FECN_MASK
) & bth
->qpn
);
219 static inline void __bth_set_fecn(void *arg
, int fecn
)
221 struct rxe_bth
*bth
= arg
;
224 bth
->qpn
|= cpu_to_be32(BTH_FECN_MASK
);
226 bth
->qpn
&= ~cpu_to_be32(BTH_FECN_MASK
);
229 static inline int __bth_becn(void *arg
)
231 struct rxe_bth
*bth
= arg
;
233 return 0 != (cpu_to_be32(BTH_BECN_MASK
) & bth
->qpn
);
236 static inline void __bth_set_becn(void *arg
, int becn
)
238 struct rxe_bth
*bth
= arg
;
241 bth
->qpn
|= cpu_to_be32(BTH_BECN_MASK
);
243 bth
->qpn
&= ~cpu_to_be32(BTH_BECN_MASK
);
246 static inline u8
__bth_resv6a(void *arg
)
248 struct rxe_bth
*bth
= arg
;
250 return (BTH_RESV6A_MASK
& be32_to_cpu(bth
->qpn
)) >> 24;
253 static inline void __bth_set_resv6a(void *arg
)
255 struct rxe_bth
*bth
= arg
;
257 bth
->qpn
= cpu_to_be32(~BTH_RESV6A_MASK
);
260 static inline int __bth_ack(void *arg
)
262 struct rxe_bth
*bth
= arg
;
264 return 0 != (cpu_to_be32(BTH_ACK_MASK
) & bth
->apsn
);
267 static inline void __bth_set_ack(void *arg
, int ack
)
269 struct rxe_bth
*bth
= arg
;
272 bth
->apsn
|= cpu_to_be32(BTH_ACK_MASK
);
274 bth
->apsn
&= ~cpu_to_be32(BTH_ACK_MASK
);
277 static inline void __bth_set_resv7(void *arg
)
279 struct rxe_bth
*bth
= arg
;
281 bth
->apsn
&= ~cpu_to_be32(BTH_RESV7_MASK
);
284 static inline u32
__bth_psn(void *arg
)
286 struct rxe_bth
*bth
= arg
;
288 return BTH_PSN_MASK
& be32_to_cpu(bth
->apsn
);
291 static inline void __bth_set_psn(void *arg
, u32 psn
)
293 struct rxe_bth
*bth
= arg
;
294 u32 apsn
= be32_to_cpu(bth
->apsn
);
296 bth
->apsn
= cpu_to_be32((BTH_PSN_MASK
& psn
) |
297 (~BTH_PSN_MASK
& apsn
));
300 static inline u8
bth_opcode(struct rxe_pkt_info
*pkt
)
302 return __bth_opcode(pkt
->hdr
+ pkt
->offset
);
305 static inline void bth_set_opcode(struct rxe_pkt_info
*pkt
, u8 opcode
)
307 __bth_set_opcode(pkt
->hdr
+ pkt
->offset
, opcode
);
310 static inline u8
bth_se(struct rxe_pkt_info
*pkt
)
312 return __bth_se(pkt
->hdr
+ pkt
->offset
);
315 static inline void bth_set_se(struct rxe_pkt_info
*pkt
, int se
)
317 __bth_set_se(pkt
->hdr
+ pkt
->offset
, se
);
320 static inline u8
bth_mig(struct rxe_pkt_info
*pkt
)
322 return __bth_mig(pkt
->hdr
+ pkt
->offset
);
325 static inline void bth_set_mig(struct rxe_pkt_info
*pkt
, u8 mig
)
327 __bth_set_mig(pkt
->hdr
+ pkt
->offset
, mig
);
330 static inline u8
bth_pad(struct rxe_pkt_info
*pkt
)
332 return __bth_pad(pkt
->hdr
+ pkt
->offset
);
335 static inline void bth_set_pad(struct rxe_pkt_info
*pkt
, u8 pad
)
337 __bth_set_pad(pkt
->hdr
+ pkt
->offset
, pad
);
340 static inline u8
bth_tver(struct rxe_pkt_info
*pkt
)
342 return __bth_tver(pkt
->hdr
+ pkt
->offset
);
345 static inline void bth_set_tver(struct rxe_pkt_info
*pkt
, u8 tver
)
347 __bth_set_tver(pkt
->hdr
+ pkt
->offset
, tver
);
350 static inline u16
bth_pkey(struct rxe_pkt_info
*pkt
)
352 return __bth_pkey(pkt
->hdr
+ pkt
->offset
);
355 static inline void bth_set_pkey(struct rxe_pkt_info
*pkt
, u16 pkey
)
357 __bth_set_pkey(pkt
->hdr
+ pkt
->offset
, pkey
);
360 static inline u32
bth_qpn(struct rxe_pkt_info
*pkt
)
362 return __bth_qpn(pkt
->hdr
+ pkt
->offset
);
365 static inline void bth_set_qpn(struct rxe_pkt_info
*pkt
, u32 qpn
)
367 __bth_set_qpn(pkt
->hdr
+ pkt
->offset
, qpn
);
370 static inline int bth_fecn(struct rxe_pkt_info
*pkt
)
372 return __bth_fecn(pkt
->hdr
+ pkt
->offset
);
375 static inline void bth_set_fecn(struct rxe_pkt_info
*pkt
, int fecn
)
377 __bth_set_fecn(pkt
->hdr
+ pkt
->offset
, fecn
);
380 static inline int bth_becn(struct rxe_pkt_info
*pkt
)
382 return __bth_becn(pkt
->hdr
+ pkt
->offset
);
385 static inline void bth_set_becn(struct rxe_pkt_info
*pkt
, int becn
)
387 __bth_set_becn(pkt
->hdr
+ pkt
->offset
, becn
);
390 static inline u8
bth_resv6a(struct rxe_pkt_info
*pkt
)
392 return __bth_resv6a(pkt
->hdr
+ pkt
->offset
);
395 static inline void bth_set_resv6a(struct rxe_pkt_info
*pkt
)
397 __bth_set_resv6a(pkt
->hdr
+ pkt
->offset
);
400 static inline int bth_ack(struct rxe_pkt_info
*pkt
)
402 return __bth_ack(pkt
->hdr
+ pkt
->offset
);
405 static inline void bth_set_ack(struct rxe_pkt_info
*pkt
, int ack
)
407 __bth_set_ack(pkt
->hdr
+ pkt
->offset
, ack
);
410 static inline void bth_set_resv7(struct rxe_pkt_info
*pkt
)
412 __bth_set_resv7(pkt
->hdr
+ pkt
->offset
);
415 static inline u32
bth_psn(struct rxe_pkt_info
*pkt
)
417 return __bth_psn(pkt
->hdr
+ pkt
->offset
);
420 static inline void bth_set_psn(struct rxe_pkt_info
*pkt
, u32 psn
)
422 __bth_set_psn(pkt
->hdr
+ pkt
->offset
, psn
);
425 static inline void bth_init(struct rxe_pkt_info
*pkt
, u8 opcode
, int se
,
426 int mig
, int pad
, u16 pkey
, u32 qpn
, int ack_req
,
429 struct rxe_bth
*bth
= (struct rxe_bth
*)(pkt
->hdr
+ pkt
->offset
);
431 bth
->opcode
= opcode
;
432 bth
->flags
= (pad
<< 4) & BTH_PAD_MASK
;
434 bth
->flags
|= BTH_SE_MASK
;
436 bth
->flags
|= BTH_MIG_MASK
;
437 bth
->pkey
= cpu_to_be16(pkey
);
438 bth
->qpn
= cpu_to_be32(qpn
& BTH_QPN_MASK
);
442 bth
->apsn
= cpu_to_be32(psn
);
445 /******************************************************************************
446 * Reliable Datagram Extended Transport Header
447 ******************************************************************************/
452 #define RDETH_EEN_MASK (0x00ffffff)
454 static inline u8
__rdeth_een(void *arg
)
456 struct rxe_rdeth
*rdeth
= arg
;
458 return RDETH_EEN_MASK
& be32_to_cpu(rdeth
->een
);
461 static inline void __rdeth_set_een(void *arg
, u32 een
)
463 struct rxe_rdeth
*rdeth
= arg
;
465 rdeth
->een
= cpu_to_be32(RDETH_EEN_MASK
& een
);
468 static inline u8
rdeth_een(struct rxe_pkt_info
*pkt
)
470 return __rdeth_een(pkt
->hdr
+ pkt
->offset
471 + rxe_opcode
[pkt
->opcode
].offset
[RXE_RDETH
]);
474 static inline void rdeth_set_een(struct rxe_pkt_info
*pkt
, u32 een
)
476 __rdeth_set_een(pkt
->hdr
+ pkt
->offset
477 + rxe_opcode
[pkt
->opcode
].offset
[RXE_RDETH
], een
);
480 /******************************************************************************
481 * Datagram Extended Transport Header
482 ******************************************************************************/
488 #define GSI_QKEY (0x80010000)
489 #define DETH_SQP_MASK (0x00ffffff)
491 static inline u32
__deth_qkey(void *arg
)
493 struct rxe_deth
*deth
= arg
;
495 return be32_to_cpu(deth
->qkey
);
498 static inline void __deth_set_qkey(void *arg
, u32 qkey
)
500 struct rxe_deth
*deth
= arg
;
502 deth
->qkey
= cpu_to_be32(qkey
);
505 static inline u32
__deth_sqp(void *arg
)
507 struct rxe_deth
*deth
= arg
;
509 return DETH_SQP_MASK
& be32_to_cpu(deth
->sqp
);
512 static inline void __deth_set_sqp(void *arg
, u32 sqp
)
514 struct rxe_deth
*deth
= arg
;
516 deth
->sqp
= cpu_to_be32(DETH_SQP_MASK
& sqp
);
519 static inline u32
deth_qkey(struct rxe_pkt_info
*pkt
)
521 return __deth_qkey(pkt
->hdr
+ pkt
->offset
522 + rxe_opcode
[pkt
->opcode
].offset
[RXE_DETH
]);
525 static inline void deth_set_qkey(struct rxe_pkt_info
*pkt
, u32 qkey
)
527 __deth_set_qkey(pkt
->hdr
+ pkt
->offset
528 + rxe_opcode
[pkt
->opcode
].offset
[RXE_DETH
], qkey
);
531 static inline u32
deth_sqp(struct rxe_pkt_info
*pkt
)
533 return __deth_sqp(pkt
->hdr
+ pkt
->offset
534 + rxe_opcode
[pkt
->opcode
].offset
[RXE_DETH
]);
537 static inline void deth_set_sqp(struct rxe_pkt_info
*pkt
, u32 sqp
)
539 __deth_set_sqp(pkt
->hdr
+ pkt
->offset
540 + rxe_opcode
[pkt
->opcode
].offset
[RXE_DETH
], sqp
);
543 /******************************************************************************
544 * RDMA Extended Transport Header
545 ******************************************************************************/
552 static inline u64
__reth_va(void *arg
)
554 struct rxe_reth
*reth
= arg
;
556 return be64_to_cpu(reth
->va
);
559 static inline void __reth_set_va(void *arg
, u64 va
)
561 struct rxe_reth
*reth
= arg
;
563 reth
->va
= cpu_to_be64(va
);
566 static inline u32
__reth_rkey(void *arg
)
568 struct rxe_reth
*reth
= arg
;
570 return be32_to_cpu(reth
->rkey
);
573 static inline void __reth_set_rkey(void *arg
, u32 rkey
)
575 struct rxe_reth
*reth
= arg
;
577 reth
->rkey
= cpu_to_be32(rkey
);
580 static inline u32
__reth_len(void *arg
)
582 struct rxe_reth
*reth
= arg
;
584 return be32_to_cpu(reth
->len
);
587 static inline void __reth_set_len(void *arg
, u32 len
)
589 struct rxe_reth
*reth
= arg
;
591 reth
->len
= cpu_to_be32(len
);
594 static inline u64
reth_va(struct rxe_pkt_info
*pkt
)
596 return __reth_va(pkt
->hdr
+ pkt
->offset
597 + rxe_opcode
[pkt
->opcode
].offset
[RXE_RETH
]);
600 static inline void reth_set_va(struct rxe_pkt_info
*pkt
, u64 va
)
602 __reth_set_va(pkt
->hdr
+ pkt
->offset
603 + rxe_opcode
[pkt
->opcode
].offset
[RXE_RETH
], va
);
606 static inline u32
reth_rkey(struct rxe_pkt_info
*pkt
)
608 return __reth_rkey(pkt
->hdr
+ pkt
->offset
609 + rxe_opcode
[pkt
->opcode
].offset
[RXE_RETH
]);
612 static inline void reth_set_rkey(struct rxe_pkt_info
*pkt
, u32 rkey
)
614 __reth_set_rkey(pkt
->hdr
+ pkt
->offset
615 + rxe_opcode
[pkt
->opcode
].offset
[RXE_RETH
], rkey
);
618 static inline u32
reth_len(struct rxe_pkt_info
*pkt
)
620 return __reth_len(pkt
->hdr
+ pkt
->offset
621 + rxe_opcode
[pkt
->opcode
].offset
[RXE_RETH
]);
624 static inline void reth_set_len(struct rxe_pkt_info
*pkt
, u32 len
)
626 __reth_set_len(pkt
->hdr
+ pkt
->offset
627 + rxe_opcode
[pkt
->opcode
].offset
[RXE_RETH
], len
);
630 /******************************************************************************
631 * Atomic Extended Transport Header
632 ******************************************************************************/
638 } __attribute__((__packed__
));
640 static inline u64
__atmeth_va(void *arg
)
642 struct rxe_atmeth
*atmeth
= arg
;
644 return be64_to_cpu(atmeth
->va
);
647 static inline void __atmeth_set_va(void *arg
, u64 va
)
649 struct rxe_atmeth
*atmeth
= arg
;
651 atmeth
->va
= cpu_to_be64(va
);
654 static inline u32
__atmeth_rkey(void *arg
)
656 struct rxe_atmeth
*atmeth
= arg
;
658 return be32_to_cpu(atmeth
->rkey
);
661 static inline void __atmeth_set_rkey(void *arg
, u32 rkey
)
663 struct rxe_atmeth
*atmeth
= arg
;
665 atmeth
->rkey
= cpu_to_be32(rkey
);
668 static inline u64
__atmeth_swap_add(void *arg
)
670 struct rxe_atmeth
*atmeth
= arg
;
672 return be64_to_cpu(atmeth
->swap_add
);
675 static inline void __atmeth_set_swap_add(void *arg
, u64 swap_add
)
677 struct rxe_atmeth
*atmeth
= arg
;
679 atmeth
->swap_add
= cpu_to_be64(swap_add
);
682 static inline u64
__atmeth_comp(void *arg
)
684 struct rxe_atmeth
*atmeth
= arg
;
686 return be64_to_cpu(atmeth
->comp
);
689 static inline void __atmeth_set_comp(void *arg
, u64 comp
)
691 struct rxe_atmeth
*atmeth
= arg
;
693 atmeth
->comp
= cpu_to_be64(comp
);
696 static inline u64
atmeth_va(struct rxe_pkt_info
*pkt
)
698 return __atmeth_va(pkt
->hdr
+ pkt
->offset
699 + rxe_opcode
[pkt
->opcode
].offset
[RXE_ATMETH
]);
702 static inline void atmeth_set_va(struct rxe_pkt_info
*pkt
, u64 va
)
704 __atmeth_set_va(pkt
->hdr
+ pkt
->offset
705 + rxe_opcode
[pkt
->opcode
].offset
[RXE_ATMETH
], va
);
708 static inline u32
atmeth_rkey(struct rxe_pkt_info
*pkt
)
710 return __atmeth_rkey(pkt
->hdr
+ pkt
->offset
711 + rxe_opcode
[pkt
->opcode
].offset
[RXE_ATMETH
]);
714 static inline void atmeth_set_rkey(struct rxe_pkt_info
*pkt
, u32 rkey
)
716 __atmeth_set_rkey(pkt
->hdr
+ pkt
->offset
717 + rxe_opcode
[pkt
->opcode
].offset
[RXE_ATMETH
], rkey
);
720 static inline u64
atmeth_swap_add(struct rxe_pkt_info
*pkt
)
722 return __atmeth_swap_add(pkt
->hdr
+ pkt
->offset
723 + rxe_opcode
[pkt
->opcode
].offset
[RXE_ATMETH
]);
726 static inline void atmeth_set_swap_add(struct rxe_pkt_info
*pkt
, u64 swap_add
)
728 __atmeth_set_swap_add(pkt
->hdr
+ pkt
->offset
729 + rxe_opcode
[pkt
->opcode
].offset
[RXE_ATMETH
], swap_add
);
732 static inline u64
atmeth_comp(struct rxe_pkt_info
*pkt
)
734 return __atmeth_comp(pkt
->hdr
+ pkt
->offset
735 + rxe_opcode
[pkt
->opcode
].offset
[RXE_ATMETH
]);
738 static inline void atmeth_set_comp(struct rxe_pkt_info
*pkt
, u64 comp
)
740 __atmeth_set_comp(pkt
->hdr
+ pkt
->offset
741 + rxe_opcode
[pkt
->opcode
].offset
[RXE_ATMETH
], comp
);
744 /******************************************************************************
745 * Ack Extended Transport Header
746 ******************************************************************************/
751 #define AETH_SYN_MASK (0xff000000)
752 #define AETH_MSN_MASK (0x00ffffff)
755 AETH_TYPE_MASK
= 0xe0,
760 AETH_ACK_UNLIMITED
= 0x1f,
761 AETH_NAK_PSN_SEQ_ERROR
= 0x60,
762 AETH_NAK_INVALID_REQ
= 0x61,
763 AETH_NAK_REM_ACC_ERR
= 0x62,
764 AETH_NAK_REM_OP_ERR
= 0x63,
765 AETH_NAK_INV_RD_REQ
= 0x64,
768 static inline u8
__aeth_syn(void *arg
)
770 struct rxe_aeth
*aeth
= arg
;
772 return (AETH_SYN_MASK
& be32_to_cpu(aeth
->smsn
)) >> 24;
775 static inline void __aeth_set_syn(void *arg
, u8 syn
)
777 struct rxe_aeth
*aeth
= arg
;
778 u32 smsn
= be32_to_cpu(aeth
->smsn
);
780 aeth
->smsn
= cpu_to_be32((AETH_SYN_MASK
& (syn
<< 24)) |
781 (~AETH_SYN_MASK
& smsn
));
784 static inline u32
__aeth_msn(void *arg
)
786 struct rxe_aeth
*aeth
= arg
;
788 return AETH_MSN_MASK
& be32_to_cpu(aeth
->smsn
);
791 static inline void __aeth_set_msn(void *arg
, u32 msn
)
793 struct rxe_aeth
*aeth
= arg
;
794 u32 smsn
= be32_to_cpu(aeth
->smsn
);
796 aeth
->smsn
= cpu_to_be32((AETH_MSN_MASK
& msn
) |
797 (~AETH_MSN_MASK
& smsn
));
800 static inline u8
aeth_syn(struct rxe_pkt_info
*pkt
)
802 return __aeth_syn(pkt
->hdr
+ pkt
->offset
803 + rxe_opcode
[pkt
->opcode
].offset
[RXE_AETH
]);
806 static inline void aeth_set_syn(struct rxe_pkt_info
*pkt
, u8 syn
)
808 __aeth_set_syn(pkt
->hdr
+ pkt
->offset
809 + rxe_opcode
[pkt
->opcode
].offset
[RXE_AETH
], syn
);
812 static inline u32
aeth_msn(struct rxe_pkt_info
*pkt
)
814 return __aeth_msn(pkt
->hdr
+ pkt
->offset
815 + rxe_opcode
[pkt
->opcode
].offset
[RXE_AETH
]);
818 static inline void aeth_set_msn(struct rxe_pkt_info
*pkt
, u32 msn
)
820 __aeth_set_msn(pkt
->hdr
+ pkt
->offset
821 + rxe_opcode
[pkt
->opcode
].offset
[RXE_AETH
], msn
);
824 /******************************************************************************
825 * Atomic Ack Extended Transport Header
826 ******************************************************************************/
831 static inline u64
__atmack_orig(void *arg
)
833 struct rxe_atmack
*atmack
= arg
;
835 return be64_to_cpu(atmack
->orig
);
838 static inline void __atmack_set_orig(void *arg
, u64 orig
)
840 struct rxe_atmack
*atmack
= arg
;
842 atmack
->orig
= cpu_to_be64(orig
);
845 static inline u64
atmack_orig(struct rxe_pkt_info
*pkt
)
847 return __atmack_orig(pkt
->hdr
+ pkt
->offset
848 + rxe_opcode
[pkt
->opcode
].offset
[RXE_ATMACK
]);
851 static inline void atmack_set_orig(struct rxe_pkt_info
*pkt
, u64 orig
)
853 __atmack_set_orig(pkt
->hdr
+ pkt
->offset
854 + rxe_opcode
[pkt
->opcode
].offset
[RXE_ATMACK
], orig
);
857 /******************************************************************************
858 * Immediate Extended Transport Header
859 ******************************************************************************/
864 static inline __be32
__immdt_imm(void *arg
)
866 struct rxe_immdt
*immdt
= arg
;
871 static inline void __immdt_set_imm(void *arg
, __be32 imm
)
873 struct rxe_immdt
*immdt
= arg
;
878 static inline __be32
immdt_imm(struct rxe_pkt_info
*pkt
)
880 return __immdt_imm(pkt
->hdr
+ pkt
->offset
881 + rxe_opcode
[pkt
->opcode
].offset
[RXE_IMMDT
]);
884 static inline void immdt_set_imm(struct rxe_pkt_info
*pkt
, __be32 imm
)
886 __immdt_set_imm(pkt
->hdr
+ pkt
->offset
887 + rxe_opcode
[pkt
->opcode
].offset
[RXE_IMMDT
], imm
);
890 /******************************************************************************
891 * Invalidate Extended Transport Header
892 ******************************************************************************/
897 static inline u32
__ieth_rkey(void *arg
)
899 struct rxe_ieth
*ieth
= arg
;
901 return be32_to_cpu(ieth
->rkey
);
904 static inline void __ieth_set_rkey(void *arg
, u32 rkey
)
906 struct rxe_ieth
*ieth
= arg
;
908 ieth
->rkey
= cpu_to_be32(rkey
);
911 static inline u32
ieth_rkey(struct rxe_pkt_info
*pkt
)
913 return __ieth_rkey(pkt
->hdr
+ pkt
->offset
914 + rxe_opcode
[pkt
->opcode
].offset
[RXE_IETH
]);
917 static inline void ieth_set_rkey(struct rxe_pkt_info
*pkt
, u32 rkey
)
919 __ieth_set_rkey(pkt
->hdr
+ pkt
->offset
920 + rxe_opcode
[pkt
->opcode
].offset
[RXE_IETH
], rkey
);
923 enum rxe_hdr_length
{
924 RXE_BTH_BYTES
= sizeof(struct rxe_bth
),
925 RXE_DETH_BYTES
= sizeof(struct rxe_deth
),
926 RXE_IMMDT_BYTES
= sizeof(struct rxe_immdt
),
927 RXE_RETH_BYTES
= sizeof(struct rxe_reth
),
928 RXE_AETH_BYTES
= sizeof(struct rxe_aeth
),
929 RXE_ATMACK_BYTES
= sizeof(struct rxe_atmack
),
930 RXE_ATMETH_BYTES
= sizeof(struct rxe_atmeth
),
931 RXE_IETH_BYTES
= sizeof(struct rxe_ieth
),
932 RXE_RDETH_BYTES
= sizeof(struct rxe_rdeth
),
935 static inline size_t header_size(struct rxe_pkt_info
*pkt
)
937 return pkt
->offset
+ rxe_opcode
[pkt
->opcode
].length
;
940 static inline void *payload_addr(struct rxe_pkt_info
*pkt
)
942 return pkt
->hdr
+ pkt
->offset
943 + rxe_opcode
[pkt
->opcode
].offset
[RXE_PAYLOAD
];
946 static inline size_t payload_size(struct rxe_pkt_info
*pkt
)
948 return pkt
->paylen
- rxe_opcode
[pkt
->opcode
].offset
[RXE_PAYLOAD
]
949 - bth_pad(pkt
) - RXE_ICRC_SIZE
;
952 #endif /* RXE_HDR_H */