2 * Copyright (c) 2004 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING the madirectory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use source and binary forms, with or
13 * withmodification, are permitted provided that the following
16 * - Redistributions of source code must retathe above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHWARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS THE
34 #if !defined(CM_MSGS_H)
37 #include <rdma/ib_mad.h>
40 * Parameters to routines below should be in network-byte order, and values
41 * are returned in network-byte order.
44 #define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
46 #define CM_REQ_ATTR_ID __constant_htons(0x0010)
47 #define CM_MRA_ATTR_ID __constant_htons(0x0011)
48 #define CM_REJ_ATTR_ID __constant_htons(0x0012)
49 #define CM_REP_ATTR_ID __constant_htons(0x0013)
50 #define CM_RTU_ATTR_ID __constant_htons(0x0014)
51 #define CM_DREQ_ATTR_ID __constant_htons(0x0015)
52 #define CM_DREP_ATTR_ID __constant_htons(0x0016)
53 #define CM_SIDR_REQ_ATTR_ID __constant_htons(0x0017)
54 #define CM_SIDR_REP_ATTR_ID __constant_htons(0x0018)
55 #define CM_LAP_ATTR_ID __constant_htons(0x0019)
56 #define CM_APR_ATTR_ID __constant_htons(0x001A)
58 enum cm_msg_sequence
{
66 struct ib_mad_hdr hdr
;
74 /* local QPN:24, responder resources:8 */
76 /* local EECN:24, initiator depth:8 */
79 * remote EECN:24, remote CM response timeout:5,
80 * transport service type:2, end-to-end flow control:1
83 /* starting PSN:24, local CM response timeout:5, retry count:3 */
86 /* path MTU:4, RDC exists:1, RNR retry count:3. */
88 /* max CM Retries:4, SRQ:1, rsvd:3 */
91 __be16 primary_local_lid
;
92 __be16 primary_remote_lid
;
93 union ib_gid primary_local_gid
;
94 union ib_gid primary_remote_gid
;
95 /* flow label:20, rsvd:6, packet rate:6 */
96 __be32 primary_offset88
;
97 u8 primary_traffic_class
;
99 /* SL:4, subnet local:1, rsvd:3 */
101 /* local ACK timeout:5, rsvd:3 */
104 __be16 alt_local_lid
;
105 __be16 alt_remote_lid
;
106 union ib_gid alt_local_gid
;
107 union ib_gid alt_remote_gid
;
108 /* flow label:20, rsvd:6, packet rate:6 */
109 __be32 alt_offset132
;
110 u8 alt_traffic_class
;
112 /* SL:4, subnet local:1, rsvd:3 */
114 /* local ACK timeout:5, rsvd:3 */
117 u8 private_data
[IB_CM_REQ_PRIVATE_DATA_SIZE
];
119 } __attribute__ ((packed
));
121 static inline __be32
cm_req_get_local_qpn(struct cm_req_msg
*req_msg
)
123 return cpu_to_be32(be32_to_cpu(req_msg
->offset32
) >> 8);
126 static inline void cm_req_set_local_qpn(struct cm_req_msg
*req_msg
, __be32 qpn
)
128 req_msg
->offset32
= cpu_to_be32((be32_to_cpu(qpn
) << 8) |
129 (be32_to_cpu(req_msg
->offset32
) &
133 static inline u8
cm_req_get_resp_res(struct cm_req_msg
*req_msg
)
135 return (u8
) be32_to_cpu(req_msg
->offset32
);
138 static inline void cm_req_set_resp_res(struct cm_req_msg
*req_msg
, u8 resp_res
)
140 req_msg
->offset32
= cpu_to_be32(resp_res
|
141 (be32_to_cpu(req_msg
->offset32
) &
145 static inline u8
cm_req_get_init_depth(struct cm_req_msg
*req_msg
)
147 return (u8
) be32_to_cpu(req_msg
->offset36
);
150 static inline void cm_req_set_init_depth(struct cm_req_msg
*req_msg
,
153 req_msg
->offset36
= cpu_to_be32(init_depth
|
154 (be32_to_cpu(req_msg
->offset36
) &
158 static inline u8
cm_req_get_remote_resp_timeout(struct cm_req_msg
*req_msg
)
160 return (u8
) ((be32_to_cpu(req_msg
->offset40
) & 0xF8) >> 3);
163 static inline void cm_req_set_remote_resp_timeout(struct cm_req_msg
*req_msg
,
166 req_msg
->offset40
= cpu_to_be32((resp_timeout
<< 3) |
167 (be32_to_cpu(req_msg
->offset40
) &
171 static inline enum ib_qp_type
cm_req_get_qp_type(struct cm_req_msg
*req_msg
)
173 u8 transport_type
= (u8
) (be32_to_cpu(req_msg
->offset40
) & 0x06) >> 1;
174 switch(transport_type
) {
175 case 0: return IB_QPT_RC
;
176 case 1: return IB_QPT_UC
;
181 static inline void cm_req_set_qp_type(struct cm_req_msg
*req_msg
,
182 enum ib_qp_type qp_type
)
186 req_msg
->offset40
= cpu_to_be32((be32_to_cpu(
190 req_msg
->offset40
= cpu_to_be32(be32_to_cpu(
196 static inline u8
cm_req_get_flow_ctrl(struct cm_req_msg
*req_msg
)
198 return be32_to_cpu(req_msg
->offset40
) & 0x1;
201 static inline void cm_req_set_flow_ctrl(struct cm_req_msg
*req_msg
,
204 req_msg
->offset40
= cpu_to_be32((flow_ctrl
& 0x1) |
205 (be32_to_cpu(req_msg
->offset40
) &
209 static inline __be32
cm_req_get_starting_psn(struct cm_req_msg
*req_msg
)
211 return cpu_to_be32(be32_to_cpu(req_msg
->offset44
) >> 8);
214 static inline void cm_req_set_starting_psn(struct cm_req_msg
*req_msg
,
217 req_msg
->offset44
= cpu_to_be32((be32_to_cpu(starting_psn
) << 8) |
218 (be32_to_cpu(req_msg
->offset44
) & 0x000000FF));
221 static inline u8
cm_req_get_local_resp_timeout(struct cm_req_msg
*req_msg
)
223 return (u8
) ((be32_to_cpu(req_msg
->offset44
) & 0xF8) >> 3);
226 static inline void cm_req_set_local_resp_timeout(struct cm_req_msg
*req_msg
,
229 req_msg
->offset44
= cpu_to_be32((resp_timeout
<< 3) |
230 (be32_to_cpu(req_msg
->offset44
) & 0xFFFFFF07));
233 static inline u8
cm_req_get_retry_count(struct cm_req_msg
*req_msg
)
235 return (u8
) (be32_to_cpu(req_msg
->offset44
) & 0x7);
238 static inline void cm_req_set_retry_count(struct cm_req_msg
*req_msg
,
241 req_msg
->offset44
= cpu_to_be32((retry_count
& 0x7) |
242 (be32_to_cpu(req_msg
->offset44
) & 0xFFFFFFF8));
245 static inline u8
cm_req_get_path_mtu(struct cm_req_msg
*req_msg
)
247 return req_msg
->offset50
>> 4;
250 static inline void cm_req_set_path_mtu(struct cm_req_msg
*req_msg
, u8 path_mtu
)
252 req_msg
->offset50
= (u8
) ((req_msg
->offset50
& 0xF) | (path_mtu
<< 4));
255 static inline u8
cm_req_get_rnr_retry_count(struct cm_req_msg
*req_msg
)
257 return req_msg
->offset50
& 0x7;
260 static inline void cm_req_set_rnr_retry_count(struct cm_req_msg
*req_msg
,
263 req_msg
->offset50
= (u8
) ((req_msg
->offset50
& 0xF8) |
264 (rnr_retry_count
& 0x7));
267 static inline u8
cm_req_get_max_cm_retries(struct cm_req_msg
*req_msg
)
269 return req_msg
->offset51
>> 4;
272 static inline void cm_req_set_max_cm_retries(struct cm_req_msg
*req_msg
,
275 req_msg
->offset51
= (u8
) ((req_msg
->offset51
& 0xF) | (retries
<< 4));
278 static inline u8
cm_req_get_srq(struct cm_req_msg
*req_msg
)
280 return (req_msg
->offset51
& 0x8) >> 3;
283 static inline void cm_req_set_srq(struct cm_req_msg
*req_msg
, u8 srq
)
285 req_msg
->offset51
= (u8
) ((req_msg
->offset51
& 0xF7) |
289 static inline __be32
cm_req_get_primary_flow_label(struct cm_req_msg
*req_msg
)
291 return cpu_to_be32(be32_to_cpu(req_msg
->primary_offset88
) >> 12);
294 static inline void cm_req_set_primary_flow_label(struct cm_req_msg
*req_msg
,
297 req_msg
->primary_offset88
= cpu_to_be32(
298 (be32_to_cpu(req_msg
->primary_offset88
) &
300 (be32_to_cpu(flow_label
) << 12));
303 static inline u8
cm_req_get_primary_packet_rate(struct cm_req_msg
*req_msg
)
305 return (u8
) (be32_to_cpu(req_msg
->primary_offset88
) & 0x3F);
308 static inline void cm_req_set_primary_packet_rate(struct cm_req_msg
*req_msg
,
311 req_msg
->primary_offset88
= cpu_to_be32(
312 (be32_to_cpu(req_msg
->primary_offset88
) &
313 0xFFFFFFC0) | (rate
& 0x3F));
316 static inline u8
cm_req_get_primary_sl(struct cm_req_msg
*req_msg
)
318 return (u8
) (req_msg
->primary_offset94
>> 4);
321 static inline void cm_req_set_primary_sl(struct cm_req_msg
*req_msg
, u8 sl
)
323 req_msg
->primary_offset94
= (u8
) ((req_msg
->primary_offset94
& 0x0F) |
327 static inline u8
cm_req_get_primary_subnet_local(struct cm_req_msg
*req_msg
)
329 return (u8
) ((req_msg
->primary_offset94
& 0x08) >> 3);
332 static inline void cm_req_set_primary_subnet_local(struct cm_req_msg
*req_msg
,
335 req_msg
->primary_offset94
= (u8
) ((req_msg
->primary_offset94
& 0xF7) |
336 ((subnet_local
& 0x1) << 3));
339 static inline u8
cm_req_get_primary_local_ack_timeout(struct cm_req_msg
*req_msg
)
341 return (u8
) (req_msg
->primary_offset95
>> 3);
344 static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg
*req_msg
,
345 u8 local_ack_timeout
)
347 req_msg
->primary_offset95
= (u8
) ((req_msg
->primary_offset95
& 0x07) |
348 (local_ack_timeout
<< 3));
351 static inline __be32
cm_req_get_alt_flow_label(struct cm_req_msg
*req_msg
)
353 return cpu_to_be32(be32_to_cpu(req_msg
->alt_offset132
) >> 12);
356 static inline void cm_req_set_alt_flow_label(struct cm_req_msg
*req_msg
,
359 req_msg
->alt_offset132
= cpu_to_be32(
360 (be32_to_cpu(req_msg
->alt_offset132
) &
362 (be32_to_cpu(flow_label
) << 12));
365 static inline u8
cm_req_get_alt_packet_rate(struct cm_req_msg
*req_msg
)
367 return (u8
) (be32_to_cpu(req_msg
->alt_offset132
) & 0x3F);
370 static inline void cm_req_set_alt_packet_rate(struct cm_req_msg
*req_msg
,
373 req_msg
->alt_offset132
= cpu_to_be32(
374 (be32_to_cpu(req_msg
->alt_offset132
) &
375 0xFFFFFFC0) | (rate
& 0x3F));
378 static inline u8
cm_req_get_alt_sl(struct cm_req_msg
*req_msg
)
380 return (u8
) (req_msg
->alt_offset138
>> 4);
383 static inline void cm_req_set_alt_sl(struct cm_req_msg
*req_msg
, u8 sl
)
385 req_msg
->alt_offset138
= (u8
) ((req_msg
->alt_offset138
& 0x0F) |
389 static inline u8
cm_req_get_alt_subnet_local(struct cm_req_msg
*req_msg
)
391 return (u8
) ((req_msg
->alt_offset138
& 0x08) >> 3);
394 static inline void cm_req_set_alt_subnet_local(struct cm_req_msg
*req_msg
,
397 req_msg
->alt_offset138
= (u8
) ((req_msg
->alt_offset138
& 0xF7) |
398 ((subnet_local
& 0x1) << 3));
401 static inline u8
cm_req_get_alt_local_ack_timeout(struct cm_req_msg
*req_msg
)
403 return (u8
) (req_msg
->alt_offset139
>> 3);
406 static inline void cm_req_set_alt_local_ack_timeout(struct cm_req_msg
*req_msg
,
407 u8 local_ack_timeout
)
409 req_msg
->alt_offset139
= (u8
) ((req_msg
->alt_offset139
& 0x07) |
410 (local_ack_timeout
<< 3));
413 /* Message REJected or MRAed */
414 enum cm_msg_response
{
415 CM_MSG_RESPONSE_REQ
= 0x0,
416 CM_MSG_RESPONSE_REP
= 0x1,
417 CM_MSG_RESPONSE_OTHER
= 0x2
421 struct ib_mad_hdr hdr
;
423 __be32 local_comm_id
;
424 __be32 remote_comm_id
;
425 /* message MRAed:2, rsvd:6 */
427 /* service timeout:5, rsvd:3 */
430 u8 private_data
[IB_CM_MRA_PRIVATE_DATA_SIZE
];
432 } __attribute__ ((packed
));
434 static inline u8
cm_mra_get_msg_mraed(struct cm_mra_msg
*mra_msg
)
436 return (u8
) (mra_msg
->offset8
>> 6);
439 static inline void cm_mra_set_msg_mraed(struct cm_mra_msg
*mra_msg
, u8 msg
)
441 mra_msg
->offset8
= (u8
) ((mra_msg
->offset8
& 0x3F) | (msg
<< 6));
444 static inline u8
cm_mra_get_service_timeout(struct cm_mra_msg
*mra_msg
)
446 return (u8
) (mra_msg
->offset9
>> 3);
449 static inline void cm_mra_set_service_timeout(struct cm_mra_msg
*mra_msg
,
452 mra_msg
->offset9
= (u8
) ((mra_msg
->offset9
& 0x07) |
453 (service_timeout
<< 3));
457 struct ib_mad_hdr hdr
;
459 __be32 local_comm_id
;
460 __be32 remote_comm_id
;
461 /* message REJected:2, rsvd:6 */
463 /* reject info length:7, rsvd:1. */
466 u8 ari
[IB_CM_REJ_ARI_LENGTH
];
468 u8 private_data
[IB_CM_REJ_PRIVATE_DATA_SIZE
];
470 } __attribute__ ((packed
));
472 static inline u8
cm_rej_get_msg_rejected(struct cm_rej_msg
*rej_msg
)
474 return (u8
) (rej_msg
->offset8
>> 6);
477 static inline void cm_rej_set_msg_rejected(struct cm_rej_msg
*rej_msg
, u8 msg
)
479 rej_msg
->offset8
= (u8
) ((rej_msg
->offset8
& 0x3F) | (msg
<< 6));
482 static inline u8
cm_rej_get_reject_info_len(struct cm_rej_msg
*rej_msg
)
484 return (u8
) (rej_msg
->offset9
>> 1);
487 static inline void cm_rej_set_reject_info_len(struct cm_rej_msg
*rej_msg
,
490 rej_msg
->offset9
= (u8
) ((rej_msg
->offset9
& 0x1) | (len
<< 1));
494 struct ib_mad_hdr hdr
;
496 __be32 local_comm_id
;
497 __be32 remote_comm_id
;
499 /* local QPN:24, rsvd:8 */
501 /* local EECN:24, rsvd:8 */
503 /* starting PSN:24 rsvd:8 */
507 /* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */
509 /* RNR retry count:3, SRQ:1, rsvd:5 */
511 __be64 local_ca_guid
;
513 u8 private_data
[IB_CM_REP_PRIVATE_DATA_SIZE
];
515 } __attribute__ ((packed
));
517 static inline __be32
cm_rep_get_local_qpn(struct cm_rep_msg
*rep_msg
)
519 return cpu_to_be32(be32_to_cpu(rep_msg
->offset12
) >> 8);
522 static inline void cm_rep_set_local_qpn(struct cm_rep_msg
*rep_msg
, __be32 qpn
)
524 rep_msg
->offset12
= cpu_to_be32((be32_to_cpu(qpn
) << 8) |
525 (be32_to_cpu(rep_msg
->offset12
) & 0x000000FF));
528 static inline __be32
cm_rep_get_starting_psn(struct cm_rep_msg
*rep_msg
)
530 return cpu_to_be32(be32_to_cpu(rep_msg
->offset20
) >> 8);
533 static inline void cm_rep_set_starting_psn(struct cm_rep_msg
*rep_msg
,
536 rep_msg
->offset20
= cpu_to_be32((be32_to_cpu(starting_psn
) << 8) |
537 (be32_to_cpu(rep_msg
->offset20
) & 0x000000FF));
540 static inline u8
cm_rep_get_target_ack_delay(struct cm_rep_msg
*rep_msg
)
542 return (u8
) (rep_msg
->offset26
>> 3);
545 static inline void cm_rep_set_target_ack_delay(struct cm_rep_msg
*rep_msg
,
548 rep_msg
->offset26
= (u8
) ((rep_msg
->offset26
& 0x07) |
549 (target_ack_delay
<< 3));
552 static inline u8
cm_rep_get_failover(struct cm_rep_msg
*rep_msg
)
554 return (u8
) ((rep_msg
->offset26
& 0x06) >> 1);
557 static inline void cm_rep_set_failover(struct cm_rep_msg
*rep_msg
, u8 failover
)
559 rep_msg
->offset26
= (u8
) ((rep_msg
->offset26
& 0xF9) |
560 ((failover
& 0x3) << 1));
563 static inline u8
cm_rep_get_flow_ctrl(struct cm_rep_msg
*rep_msg
)
565 return (u8
) (rep_msg
->offset26
& 0x01);
568 static inline void cm_rep_set_flow_ctrl(struct cm_rep_msg
*rep_msg
,
571 rep_msg
->offset26
= (u8
) ((rep_msg
->offset26
& 0xFE) |
575 static inline u8
cm_rep_get_rnr_retry_count(struct cm_rep_msg
*rep_msg
)
577 return (u8
) (rep_msg
->offset27
>> 5);
580 static inline void cm_rep_set_rnr_retry_count(struct cm_rep_msg
*rep_msg
,
583 rep_msg
->offset27
= (u8
) ((rep_msg
->offset27
& 0x1F) |
584 (rnr_retry_count
<< 5));
587 static inline u8
cm_rep_get_srq(struct cm_rep_msg
*rep_msg
)
589 return (u8
) ((rep_msg
->offset27
>> 4) & 0x1);
592 static inline void cm_rep_set_srq(struct cm_rep_msg
*rep_msg
, u8 srq
)
594 rep_msg
->offset27
= (u8
) ((rep_msg
->offset27
& 0xEF) |
599 struct ib_mad_hdr hdr
;
601 __be32 local_comm_id
;
602 __be32 remote_comm_id
;
604 u8 private_data
[IB_CM_RTU_PRIVATE_DATA_SIZE
];
606 } __attribute__ ((packed
));
609 struct ib_mad_hdr hdr
;
611 __be32 local_comm_id
;
612 __be32 remote_comm_id
;
613 /* remote QPN/EECN:24, rsvd:8 */
616 u8 private_data
[IB_CM_DREQ_PRIVATE_DATA_SIZE
];
618 } __attribute__ ((packed
));
620 static inline __be32
cm_dreq_get_remote_qpn(struct cm_dreq_msg
*dreq_msg
)
622 return cpu_to_be32(be32_to_cpu(dreq_msg
->offset8
) >> 8);
625 static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg
*dreq_msg
, __be32 qpn
)
627 dreq_msg
->offset8
= cpu_to_be32((be32_to_cpu(qpn
) << 8) |
628 (be32_to_cpu(dreq_msg
->offset8
) & 0x000000FF));
632 struct ib_mad_hdr hdr
;
634 __be32 local_comm_id
;
635 __be32 remote_comm_id
;
637 u8 private_data
[IB_CM_DREP_PRIVATE_DATA_SIZE
];
639 } __attribute__ ((packed
));
642 struct ib_mad_hdr hdr
;
644 __be32 local_comm_id
;
645 __be32 remote_comm_id
;
648 /* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */
652 __be16 alt_local_lid
;
653 __be16 alt_remote_lid
;
654 union ib_gid alt_local_gid
;
655 union ib_gid alt_remote_gid
;
656 /* flow label:20, rsvd:4, traffic class:8 */
659 /* rsvd:2, packet rate:6 */
661 /* SL:4, subnet local:1, rsvd:3 */
663 /* local ACK timeout:5, rsvd:3 */
666 u8 private_data
[IB_CM_LAP_PRIVATE_DATA_SIZE
];
667 } __attribute__ ((packed
));
669 static inline __be32
cm_lap_get_remote_qpn(struct cm_lap_msg
*lap_msg
)
671 return cpu_to_be32(be32_to_cpu(lap_msg
->offset12
) >> 8);
674 static inline void cm_lap_set_remote_qpn(struct cm_lap_msg
*lap_msg
, __be32 qpn
)
676 lap_msg
->offset12
= cpu_to_be32((be32_to_cpu(qpn
) << 8) |
677 (be32_to_cpu(lap_msg
->offset12
) &
681 static inline u8
cm_lap_get_remote_resp_timeout(struct cm_lap_msg
*lap_msg
)
683 return (u8
) ((be32_to_cpu(lap_msg
->offset12
) & 0xF8) >> 3);
686 static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg
*lap_msg
,
689 lap_msg
->offset12
= cpu_to_be32((resp_timeout
<< 3) |
690 (be32_to_cpu(lap_msg
->offset12
) &
694 static inline __be32
cm_lap_get_flow_label(struct cm_lap_msg
*lap_msg
)
696 return cpu_to_be32(be32_to_cpu(lap_msg
->offset56
) >> 12);
699 static inline void cm_lap_set_flow_label(struct cm_lap_msg
*lap_msg
,
702 lap_msg
->offset56
= cpu_to_be32(
703 (be32_to_cpu(lap_msg
->offset56
) & 0x00000FFF) |
704 (be32_to_cpu(flow_label
) << 12));
707 static inline u8
cm_lap_get_traffic_class(struct cm_lap_msg
*lap_msg
)
709 return (u8
) be32_to_cpu(lap_msg
->offset56
);
712 static inline void cm_lap_set_traffic_class(struct cm_lap_msg
*lap_msg
,
715 lap_msg
->offset56
= cpu_to_be32(traffic_class
|
716 (be32_to_cpu(lap_msg
->offset56
) &
720 static inline u8
cm_lap_get_packet_rate(struct cm_lap_msg
*lap_msg
)
722 return lap_msg
->offset61
& 0x3F;
725 static inline void cm_lap_set_packet_rate(struct cm_lap_msg
*lap_msg
,
728 lap_msg
->offset61
= (packet_rate
& 0x3F) | (lap_msg
->offset61
& 0xC0);
731 static inline u8
cm_lap_get_sl(struct cm_lap_msg
*lap_msg
)
733 return lap_msg
->offset62
>> 4;
736 static inline void cm_lap_set_sl(struct cm_lap_msg
*lap_msg
, u8 sl
)
738 lap_msg
->offset62
= (sl
<< 4) | (lap_msg
->offset62
& 0x0F);
741 static inline u8
cm_lap_get_subnet_local(struct cm_lap_msg
*lap_msg
)
743 return (lap_msg
->offset62
>> 3) & 0x1;
746 static inline void cm_lap_set_subnet_local(struct cm_lap_msg
*lap_msg
,
749 lap_msg
->offset62
= ((subnet_local
& 0x1) << 3) |
750 (lap_msg
->offset61
& 0xF7);
752 static inline u8
cm_lap_get_local_ack_timeout(struct cm_lap_msg
*lap_msg
)
754 return lap_msg
->offset63
>> 3;
757 static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg
*lap_msg
,
758 u8 local_ack_timeout
)
760 lap_msg
->offset63
= (local_ack_timeout
<< 3) |
761 (lap_msg
->offset63
& 0x07);
765 struct ib_mad_hdr hdr
;
767 __be32 local_comm_id
;
768 __be32 remote_comm_id
;
772 u8 info
[IB_CM_APR_INFO_LENGTH
];
774 u8 private_data
[IB_CM_APR_PRIVATE_DATA_SIZE
];
775 } __attribute__ ((packed
));
777 struct cm_sidr_req_msg
{
778 struct ib_mad_hdr hdr
;
785 u8 private_data
[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE
];
786 } __attribute__ ((packed
));
788 struct cm_sidr_rep_msg
{
789 struct ib_mad_hdr hdr
;
799 u8 info
[IB_CM_SIDR_REP_INFO_LENGTH
];
801 u8 private_data
[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE
];
802 } __attribute__ ((packed
));
804 static inline __be32
cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg
*sidr_rep_msg
)
806 return cpu_to_be32(be32_to_cpu(sidr_rep_msg
->offset8
) >> 8);
809 static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg
*sidr_rep_msg
,
812 sidr_rep_msg
->offset8
= cpu_to_be32((be32_to_cpu(qpn
) << 8) |
813 (be32_to_cpu(sidr_rep_msg
->offset8
) &
817 #endif /* CM_MSGS_H */