Linux 4.19.133
[linux/fpc-iii.git] / drivers / infiniband / core / cm_msgs.h
blob476d4309576d823bd86eff15d52fe94033747075
1 /*
2 * Copyright (c) 2004, 2011 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING the madirectory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use source and binary forms, with or
13 * withmodification, are permitted provided that the following
14 * conditions are met:
16 * - Redistributions of source code must retathe above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
20 * - Redistributions binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHWARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS THE
32 * SOFTWARE.
34 #if !defined(CM_MSGS_H)
35 #define CM_MSGS_H
37 #include <rdma/ib_mad.h>
38 #include <rdma/ib_cm.h>
41 * Parameters to routines below should be in network-byte order, and values
42 * are returned in network-byte order.
45 #define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
47 struct cm_req_msg {
48 struct ib_mad_hdr hdr;
50 __be32 local_comm_id;
51 __be32 rsvd4;
52 __be64 service_id;
53 __be64 local_ca_guid;
54 __be32 rsvd24;
55 __be32 local_qkey;
56 /* local QPN:24, responder resources:8 */
57 __be32 offset32;
58 /* local EECN:24, initiator depth:8 */
59 __be32 offset36;
61 * remote EECN:24, remote CM response timeout:5,
62 * transport service type:2, end-to-end flow control:1
64 __be32 offset40;
65 /* starting PSN:24, local CM response timeout:5, retry count:3 */
66 __be32 offset44;
67 __be16 pkey;
68 /* path MTU:4, RDC exists:1, RNR retry count:3. */
69 u8 offset50;
70 /* max CM Retries:4, SRQ:1, extended transport type:3 */
71 u8 offset51;
73 __be16 primary_local_lid;
74 __be16 primary_remote_lid;
75 union ib_gid primary_local_gid;
76 union ib_gid primary_remote_gid;
77 /* flow label:20, rsvd:6, packet rate:6 */
78 __be32 primary_offset88;
79 u8 primary_traffic_class;
80 u8 primary_hop_limit;
81 /* SL:4, subnet local:1, rsvd:3 */
82 u8 primary_offset94;
83 /* local ACK timeout:5, rsvd:3 */
84 u8 primary_offset95;
86 __be16 alt_local_lid;
87 __be16 alt_remote_lid;
88 union ib_gid alt_local_gid;
89 union ib_gid alt_remote_gid;
90 /* flow label:20, rsvd:6, packet rate:6 */
91 __be32 alt_offset132;
92 u8 alt_traffic_class;
93 u8 alt_hop_limit;
94 /* SL:4, subnet local:1, rsvd:3 */
95 u8 alt_offset138;
96 /* local ACK timeout:5, rsvd:3 */
97 u8 alt_offset139;
99 u32 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
101 } __attribute__ ((packed));
103 static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
105 return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8);
108 static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn)
110 req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
111 (be32_to_cpu(req_msg->offset32) &
112 0x000000FF));
115 static inline u8 cm_req_get_resp_res(struct cm_req_msg *req_msg)
117 return (u8) be32_to_cpu(req_msg->offset32);
120 static inline void cm_req_set_resp_res(struct cm_req_msg *req_msg, u8 resp_res)
122 req_msg->offset32 = cpu_to_be32(resp_res |
123 (be32_to_cpu(req_msg->offset32) &
124 0xFFFFFF00));
127 static inline u8 cm_req_get_init_depth(struct cm_req_msg *req_msg)
129 return (u8) be32_to_cpu(req_msg->offset36);
132 static inline void cm_req_set_init_depth(struct cm_req_msg *req_msg,
133 u8 init_depth)
135 req_msg->offset36 = cpu_to_be32(init_depth |
136 (be32_to_cpu(req_msg->offset36) &
137 0xFFFFFF00));
140 static inline u8 cm_req_get_remote_resp_timeout(struct cm_req_msg *req_msg)
142 return (u8) ((be32_to_cpu(req_msg->offset40) & 0xF8) >> 3);
145 static inline void cm_req_set_remote_resp_timeout(struct cm_req_msg *req_msg,
146 u8 resp_timeout)
148 req_msg->offset40 = cpu_to_be32((resp_timeout << 3) |
149 (be32_to_cpu(req_msg->offset40) &
150 0xFFFFFF07));
153 static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
155 u8 transport_type = (u8) (be32_to_cpu(req_msg->offset40) & 0x06) >> 1;
156 switch(transport_type) {
157 case 0: return IB_QPT_RC;
158 case 1: return IB_QPT_UC;
159 case 3:
160 switch (req_msg->offset51 & 0x7) {
161 case 1: return IB_QPT_XRC_TGT;
162 default: return 0;
164 default: return 0;
168 static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
169 enum ib_qp_type qp_type)
171 switch(qp_type) {
172 case IB_QPT_UC:
173 req_msg->offset40 = cpu_to_be32((be32_to_cpu(
174 req_msg->offset40) &
175 0xFFFFFFF9) | 0x2);
176 break;
177 case IB_QPT_XRC_INI:
178 req_msg->offset40 = cpu_to_be32((be32_to_cpu(
179 req_msg->offset40) &
180 0xFFFFFFF9) | 0x6);
181 req_msg->offset51 = (req_msg->offset51 & 0xF8) | 1;
182 break;
183 default:
184 req_msg->offset40 = cpu_to_be32(be32_to_cpu(
185 req_msg->offset40) &
186 0xFFFFFFF9);
190 static inline u8 cm_req_get_flow_ctrl(struct cm_req_msg *req_msg)
192 return be32_to_cpu(req_msg->offset40) & 0x1;
195 static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg,
196 u8 flow_ctrl)
198 req_msg->offset40 = cpu_to_be32((flow_ctrl & 0x1) |
199 (be32_to_cpu(req_msg->offset40) &
200 0xFFFFFFFE));
203 static inline __be32 cm_req_get_starting_psn(struct cm_req_msg *req_msg)
205 return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8);
208 static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg,
209 __be32 starting_psn)
211 req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
212 (be32_to_cpu(req_msg->offset44) & 0x000000FF));
215 static inline u8 cm_req_get_local_resp_timeout(struct cm_req_msg *req_msg)
217 return (u8) ((be32_to_cpu(req_msg->offset44) & 0xF8) >> 3);
220 static inline void cm_req_set_local_resp_timeout(struct cm_req_msg *req_msg,
221 u8 resp_timeout)
223 req_msg->offset44 = cpu_to_be32((resp_timeout << 3) |
224 (be32_to_cpu(req_msg->offset44) & 0xFFFFFF07));
227 static inline u8 cm_req_get_retry_count(struct cm_req_msg *req_msg)
229 return (u8) (be32_to_cpu(req_msg->offset44) & 0x7);
232 static inline void cm_req_set_retry_count(struct cm_req_msg *req_msg,
233 u8 retry_count)
235 req_msg->offset44 = cpu_to_be32((retry_count & 0x7) |
236 (be32_to_cpu(req_msg->offset44) & 0xFFFFFFF8));
239 static inline u8 cm_req_get_path_mtu(struct cm_req_msg *req_msg)
241 return req_msg->offset50 >> 4;
244 static inline void cm_req_set_path_mtu(struct cm_req_msg *req_msg, u8 path_mtu)
246 req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF) | (path_mtu << 4));
249 static inline u8 cm_req_get_rnr_retry_count(struct cm_req_msg *req_msg)
251 return req_msg->offset50 & 0x7;
254 static inline void cm_req_set_rnr_retry_count(struct cm_req_msg *req_msg,
255 u8 rnr_retry_count)
257 req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF8) |
258 (rnr_retry_count & 0x7));
261 static inline u8 cm_req_get_max_cm_retries(struct cm_req_msg *req_msg)
263 return req_msg->offset51 >> 4;
266 static inline void cm_req_set_max_cm_retries(struct cm_req_msg *req_msg,
267 u8 retries)
269 req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF) | (retries << 4));
272 static inline u8 cm_req_get_srq(struct cm_req_msg *req_msg)
274 return (req_msg->offset51 & 0x8) >> 3;
277 static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq)
279 req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF7) |
280 ((srq & 0x1) << 3));
283 static inline __be32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg)
285 return cpu_to_be32(be32_to_cpu(req_msg->primary_offset88) >> 12);
288 static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg,
289 __be32 flow_label)
291 req_msg->primary_offset88 = cpu_to_be32(
292 (be32_to_cpu(req_msg->primary_offset88) &
293 0x00000FFF) |
294 (be32_to_cpu(flow_label) << 12));
297 static inline u8 cm_req_get_primary_packet_rate(struct cm_req_msg *req_msg)
299 return (u8) (be32_to_cpu(req_msg->primary_offset88) & 0x3F);
302 static inline void cm_req_set_primary_packet_rate(struct cm_req_msg *req_msg,
303 u8 rate)
305 req_msg->primary_offset88 = cpu_to_be32(
306 (be32_to_cpu(req_msg->primary_offset88) &
307 0xFFFFFFC0) | (rate & 0x3F));
310 static inline u8 cm_req_get_primary_sl(struct cm_req_msg *req_msg)
312 return (u8) (req_msg->primary_offset94 >> 4);
315 static inline void cm_req_set_primary_sl(struct cm_req_msg *req_msg, u8 sl)
317 req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0x0F) |
318 (sl << 4));
321 static inline u8 cm_req_get_primary_subnet_local(struct cm_req_msg *req_msg)
323 return (u8) ((req_msg->primary_offset94 & 0x08) >> 3);
326 static inline void cm_req_set_primary_subnet_local(struct cm_req_msg *req_msg,
327 u8 subnet_local)
329 req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0xF7) |
330 ((subnet_local & 0x1) << 3));
333 static inline u8 cm_req_get_primary_local_ack_timeout(struct cm_req_msg *req_msg)
335 return (u8) (req_msg->primary_offset95 >> 3);
338 static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_msg,
339 u8 local_ack_timeout)
341 req_msg->primary_offset95 = (u8) ((req_msg->primary_offset95 & 0x07) |
342 (local_ack_timeout << 3));
345 static inline __be32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg)
347 return cpu_to_be32(be32_to_cpu(req_msg->alt_offset132) >> 12);
350 static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg,
351 __be32 flow_label)
353 req_msg->alt_offset132 = cpu_to_be32(
354 (be32_to_cpu(req_msg->alt_offset132) &
355 0x00000FFF) |
356 (be32_to_cpu(flow_label) << 12));
359 static inline u8 cm_req_get_alt_packet_rate(struct cm_req_msg *req_msg)
361 return (u8) (be32_to_cpu(req_msg->alt_offset132) & 0x3F);
364 static inline void cm_req_set_alt_packet_rate(struct cm_req_msg *req_msg,
365 u8 rate)
367 req_msg->alt_offset132 = cpu_to_be32(
368 (be32_to_cpu(req_msg->alt_offset132) &
369 0xFFFFFFC0) | (rate & 0x3F));
372 static inline u8 cm_req_get_alt_sl(struct cm_req_msg *req_msg)
374 return (u8) (req_msg->alt_offset138 >> 4);
377 static inline void cm_req_set_alt_sl(struct cm_req_msg *req_msg, u8 sl)
379 req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0x0F) |
380 (sl << 4));
383 static inline u8 cm_req_get_alt_subnet_local(struct cm_req_msg *req_msg)
385 return (u8) ((req_msg->alt_offset138 & 0x08) >> 3);
388 static inline void cm_req_set_alt_subnet_local(struct cm_req_msg *req_msg,
389 u8 subnet_local)
391 req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0xF7) |
392 ((subnet_local & 0x1) << 3));
395 static inline u8 cm_req_get_alt_local_ack_timeout(struct cm_req_msg *req_msg)
397 return (u8) (req_msg->alt_offset139 >> 3);
400 static inline void cm_req_set_alt_local_ack_timeout(struct cm_req_msg *req_msg,
401 u8 local_ack_timeout)
403 req_msg->alt_offset139 = (u8) ((req_msg->alt_offset139 & 0x07) |
404 (local_ack_timeout << 3));
407 /* Message REJected or MRAed */
408 enum cm_msg_response {
409 CM_MSG_RESPONSE_REQ = 0x0,
410 CM_MSG_RESPONSE_REP = 0x1,
411 CM_MSG_RESPONSE_OTHER = 0x2
414 struct cm_mra_msg {
415 struct ib_mad_hdr hdr;
417 __be32 local_comm_id;
418 __be32 remote_comm_id;
419 /* message MRAed:2, rsvd:6 */
420 u8 offset8;
421 /* service timeout:5, rsvd:3 */
422 u8 offset9;
424 u8 private_data[IB_CM_MRA_PRIVATE_DATA_SIZE];
426 } __attribute__ ((packed));
428 static inline u8 cm_mra_get_msg_mraed(struct cm_mra_msg *mra_msg)
430 return (u8) (mra_msg->offset8 >> 6);
433 static inline void cm_mra_set_msg_mraed(struct cm_mra_msg *mra_msg, u8 msg)
435 mra_msg->offset8 = (u8) ((mra_msg->offset8 & 0x3F) | (msg << 6));
438 static inline u8 cm_mra_get_service_timeout(struct cm_mra_msg *mra_msg)
440 return (u8) (mra_msg->offset9 >> 3);
443 static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg,
444 u8 service_timeout)
446 mra_msg->offset9 = (u8) ((mra_msg->offset9 & 0x07) |
447 (service_timeout << 3));
450 struct cm_rej_msg {
451 struct ib_mad_hdr hdr;
453 __be32 local_comm_id;
454 __be32 remote_comm_id;
455 /* message REJected:2, rsvd:6 */
456 u8 offset8;
457 /* reject info length:7, rsvd:1. */
458 u8 offset9;
459 __be16 reason;
460 u8 ari[IB_CM_REJ_ARI_LENGTH];
462 u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE];
464 } __attribute__ ((packed));
466 static inline u8 cm_rej_get_msg_rejected(struct cm_rej_msg *rej_msg)
468 return (u8) (rej_msg->offset8 >> 6);
471 static inline void cm_rej_set_msg_rejected(struct cm_rej_msg *rej_msg, u8 msg)
473 rej_msg->offset8 = (u8) ((rej_msg->offset8 & 0x3F) | (msg << 6));
476 static inline u8 cm_rej_get_reject_info_len(struct cm_rej_msg *rej_msg)
478 return (u8) (rej_msg->offset9 >> 1);
481 static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg,
482 u8 len)
484 rej_msg->offset9 = (u8) ((rej_msg->offset9 & 0x1) | (len << 1));
487 struct cm_rep_msg {
488 struct ib_mad_hdr hdr;
490 __be32 local_comm_id;
491 __be32 remote_comm_id;
492 __be32 local_qkey;
493 /* local QPN:24, rsvd:8 */
494 __be32 offset12;
495 /* local EECN:24, rsvd:8 */
496 __be32 offset16;
497 /* starting PSN:24 rsvd:8 */
498 __be32 offset20;
499 u8 resp_resources;
500 u8 initiator_depth;
501 /* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */
502 u8 offset26;
503 /* RNR retry count:3, SRQ:1, rsvd:5 */
504 u8 offset27;
505 __be64 local_ca_guid;
507 u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE];
509 } __attribute__ ((packed));
511 static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
513 return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8);
516 static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn)
518 rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
519 (be32_to_cpu(rep_msg->offset12) & 0x000000FF));
522 static inline __be32 cm_rep_get_local_eecn(struct cm_rep_msg *rep_msg)
524 return cpu_to_be32(be32_to_cpu(rep_msg->offset16) >> 8);
527 static inline void cm_rep_set_local_eecn(struct cm_rep_msg *rep_msg, __be32 eecn)
529 rep_msg->offset16 = cpu_to_be32((be32_to_cpu(eecn) << 8) |
530 (be32_to_cpu(rep_msg->offset16) & 0x000000FF));
533 static inline __be32 cm_rep_get_qpn(struct cm_rep_msg *rep_msg, enum ib_qp_type qp_type)
535 return (qp_type == IB_QPT_XRC_INI) ?
536 cm_rep_get_local_eecn(rep_msg) : cm_rep_get_local_qpn(rep_msg);
539 static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
541 return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8);
544 static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg,
545 __be32 starting_psn)
547 rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
548 (be32_to_cpu(rep_msg->offset20) & 0x000000FF));
551 static inline u8 cm_rep_get_target_ack_delay(struct cm_rep_msg *rep_msg)
553 return (u8) (rep_msg->offset26 >> 3);
556 static inline void cm_rep_set_target_ack_delay(struct cm_rep_msg *rep_msg,
557 u8 target_ack_delay)
559 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0x07) |
560 (target_ack_delay << 3));
563 static inline u8 cm_rep_get_failover(struct cm_rep_msg *rep_msg)
565 return (u8) ((rep_msg->offset26 & 0x06) >> 1);
568 static inline void cm_rep_set_failover(struct cm_rep_msg *rep_msg, u8 failover)
570 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xF9) |
571 ((failover & 0x3) << 1));
574 static inline u8 cm_rep_get_flow_ctrl(struct cm_rep_msg *rep_msg)
576 return (u8) (rep_msg->offset26 & 0x01);
579 static inline void cm_rep_set_flow_ctrl(struct cm_rep_msg *rep_msg,
580 u8 flow_ctrl)
582 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xFE) |
583 (flow_ctrl & 0x1));
586 static inline u8 cm_rep_get_rnr_retry_count(struct cm_rep_msg *rep_msg)
588 return (u8) (rep_msg->offset27 >> 5);
591 static inline void cm_rep_set_rnr_retry_count(struct cm_rep_msg *rep_msg,
592 u8 rnr_retry_count)
594 rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0x1F) |
595 (rnr_retry_count << 5));
598 static inline u8 cm_rep_get_srq(struct cm_rep_msg *rep_msg)
600 return (u8) ((rep_msg->offset27 >> 4) & 0x1);
603 static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq)
605 rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0xEF) |
606 ((srq & 0x1) << 4));
609 struct cm_rtu_msg {
610 struct ib_mad_hdr hdr;
612 __be32 local_comm_id;
613 __be32 remote_comm_id;
615 u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE];
617 } __attribute__ ((packed));
619 struct cm_dreq_msg {
620 struct ib_mad_hdr hdr;
622 __be32 local_comm_id;
623 __be32 remote_comm_id;
624 /* remote QPN/EECN:24, rsvd:8 */
625 __be32 offset8;
627 u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE];
629 } __attribute__ ((packed));
631 static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
633 return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8);
636 static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn)
638 dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
639 (be32_to_cpu(dreq_msg->offset8) & 0x000000FF));
642 struct cm_drep_msg {
643 struct ib_mad_hdr hdr;
645 __be32 local_comm_id;
646 __be32 remote_comm_id;
648 u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE];
650 } __attribute__ ((packed));
652 struct cm_lap_msg {
653 struct ib_mad_hdr hdr;
655 __be32 local_comm_id;
656 __be32 remote_comm_id;
658 __be32 rsvd8;
659 /* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */
660 __be32 offset12;
661 __be32 rsvd16;
663 __be16 alt_local_lid;
664 __be16 alt_remote_lid;
665 union ib_gid alt_local_gid;
666 union ib_gid alt_remote_gid;
667 /* flow label:20, rsvd:4, traffic class:8 */
668 __be32 offset56;
669 u8 alt_hop_limit;
670 /* rsvd:2, packet rate:6 */
671 u8 offset61;
672 /* SL:4, subnet local:1, rsvd:3 */
673 u8 offset62;
674 /* local ACK timeout:5, rsvd:3 */
675 u8 offset63;
677 u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE];
678 } __attribute__ ((packed));
680 static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
682 return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8);
685 static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn)
687 lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
688 (be32_to_cpu(lap_msg->offset12) &
689 0x000000FF));
692 static inline u8 cm_lap_get_remote_resp_timeout(struct cm_lap_msg *lap_msg)
694 return (u8) ((be32_to_cpu(lap_msg->offset12) & 0xF8) >> 3);
697 static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg *lap_msg,
698 u8 resp_timeout)
700 lap_msg->offset12 = cpu_to_be32((resp_timeout << 3) |
701 (be32_to_cpu(lap_msg->offset12) &
702 0xFFFFFF07));
705 static inline __be32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg)
707 return cpu_to_be32(be32_to_cpu(lap_msg->offset56) >> 12);
710 static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg,
711 __be32 flow_label)
713 lap_msg->offset56 = cpu_to_be32(
714 (be32_to_cpu(lap_msg->offset56) & 0x00000FFF) |
715 (be32_to_cpu(flow_label) << 12));
718 static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg)
720 return (u8) be32_to_cpu(lap_msg->offset56);
723 static inline void cm_lap_set_traffic_class(struct cm_lap_msg *lap_msg,
724 u8 traffic_class)
726 lap_msg->offset56 = cpu_to_be32(traffic_class |
727 (be32_to_cpu(lap_msg->offset56) &
728 0xFFFFFF00));
731 static inline u8 cm_lap_get_packet_rate(struct cm_lap_msg *lap_msg)
733 return lap_msg->offset61 & 0x3F;
736 static inline void cm_lap_set_packet_rate(struct cm_lap_msg *lap_msg,
737 u8 packet_rate)
739 lap_msg->offset61 = (packet_rate & 0x3F) | (lap_msg->offset61 & 0xC0);
742 static inline u8 cm_lap_get_sl(struct cm_lap_msg *lap_msg)
744 return lap_msg->offset62 >> 4;
747 static inline void cm_lap_set_sl(struct cm_lap_msg *lap_msg, u8 sl)
749 lap_msg->offset62 = (sl << 4) | (lap_msg->offset62 & 0x0F);
752 static inline u8 cm_lap_get_subnet_local(struct cm_lap_msg *lap_msg)
754 return (lap_msg->offset62 >> 3) & 0x1;
757 static inline void cm_lap_set_subnet_local(struct cm_lap_msg *lap_msg,
758 u8 subnet_local)
760 lap_msg->offset62 = ((subnet_local & 0x1) << 3) |
761 (lap_msg->offset61 & 0xF7);
763 static inline u8 cm_lap_get_local_ack_timeout(struct cm_lap_msg *lap_msg)
765 return lap_msg->offset63 >> 3;
768 static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg *lap_msg,
769 u8 local_ack_timeout)
771 lap_msg->offset63 = (local_ack_timeout << 3) |
772 (lap_msg->offset63 & 0x07);
775 struct cm_apr_msg {
776 struct ib_mad_hdr hdr;
778 __be32 local_comm_id;
779 __be32 remote_comm_id;
781 u8 info_length;
782 u8 ap_status;
783 __be16 rsvd;
784 u8 info[IB_CM_APR_INFO_LENGTH];
786 u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE];
787 } __attribute__ ((packed));
789 struct cm_sidr_req_msg {
790 struct ib_mad_hdr hdr;
792 __be32 request_id;
793 __be16 pkey;
794 __be16 rsvd;
795 __be64 service_id;
797 u32 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
798 } __attribute__ ((packed));
800 struct cm_sidr_rep_msg {
801 struct ib_mad_hdr hdr;
803 __be32 request_id;
804 u8 status;
805 u8 info_length;
806 __be16 rsvd;
807 /* QPN:24, rsvd:8 */
808 __be32 offset8;
809 __be64 service_id;
810 __be32 qkey;
811 u8 info[IB_CM_SIDR_REP_INFO_LENGTH];
813 u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE];
814 } __attribute__ ((packed));
816 static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
818 return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8);
821 static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg,
822 __be32 qpn)
824 sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
825 (be32_to_cpu(sidr_rep_msg->offset8) &
826 0x000000FF));
829 #endif /* CM_MSGS_H */