inet: frag: enforce memory limits earlier
[linux/fpc-iii.git] / drivers / infiniband / core / cm_msgs.h
blob8b76f0ef965e88d7171e8cebcbaf2a0c9a29f962
1 /*
2 * Copyright (c) 2004, 2011 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING the madirectory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use source and binary forms, with or
13 * withmodification, are permitted provided that the following
14 * conditions are met:
16 * - Redistributions of source code must retathe above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
20 * - Redistributions binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHWARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS THE
32 * SOFTWARE.
34 #if !defined(CM_MSGS_H)
35 #define CM_MSGS_H
37 #include <rdma/ib_mad.h>
38 #include <rdma/ib_cm.h>
41 * Parameters to routines below should be in network-byte order, and values
42 * are returned in network-byte order.
45 #define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
47 enum cm_msg_sequence {
48 CM_MSG_SEQUENCE_REQ,
49 CM_MSG_SEQUENCE_LAP,
50 CM_MSG_SEQUENCE_DREQ,
51 CM_MSG_SEQUENCE_SIDR
54 struct cm_req_msg {
55 struct ib_mad_hdr hdr;
57 __be32 local_comm_id;
58 __be32 rsvd4;
59 __be64 service_id;
60 __be64 local_ca_guid;
61 __be32 rsvd24;
62 __be32 local_qkey;
63 /* local QPN:24, responder resources:8 */
64 __be32 offset32;
65 /* local EECN:24, initiator depth:8 */
66 __be32 offset36;
68 * remote EECN:24, remote CM response timeout:5,
69 * transport service type:2, end-to-end flow control:1
71 __be32 offset40;
72 /* starting PSN:24, local CM response timeout:5, retry count:3 */
73 __be32 offset44;
74 __be16 pkey;
75 /* path MTU:4, RDC exists:1, RNR retry count:3. */
76 u8 offset50;
77 /* max CM Retries:4, SRQ:1, extended transport type:3 */
78 u8 offset51;
80 __be16 primary_local_lid;
81 __be16 primary_remote_lid;
82 union ib_gid primary_local_gid;
83 union ib_gid primary_remote_gid;
84 /* flow label:20, rsvd:6, packet rate:6 */
85 __be32 primary_offset88;
86 u8 primary_traffic_class;
87 u8 primary_hop_limit;
88 /* SL:4, subnet local:1, rsvd:3 */
89 u8 primary_offset94;
90 /* local ACK timeout:5, rsvd:3 */
91 u8 primary_offset95;
93 __be16 alt_local_lid;
94 __be16 alt_remote_lid;
95 union ib_gid alt_local_gid;
96 union ib_gid alt_remote_gid;
97 /* flow label:20, rsvd:6, packet rate:6 */
98 __be32 alt_offset132;
99 u8 alt_traffic_class;
100 u8 alt_hop_limit;
101 /* SL:4, subnet local:1, rsvd:3 */
102 u8 alt_offset138;
103 /* local ACK timeout:5, rsvd:3 */
104 u8 alt_offset139;
106 u32 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
108 } __attribute__ ((packed));
110 static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
112 return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8);
115 static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn)
117 req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
118 (be32_to_cpu(req_msg->offset32) &
119 0x000000FF));
122 static inline u8 cm_req_get_resp_res(struct cm_req_msg *req_msg)
124 return (u8) be32_to_cpu(req_msg->offset32);
127 static inline void cm_req_set_resp_res(struct cm_req_msg *req_msg, u8 resp_res)
129 req_msg->offset32 = cpu_to_be32(resp_res |
130 (be32_to_cpu(req_msg->offset32) &
131 0xFFFFFF00));
134 static inline u8 cm_req_get_init_depth(struct cm_req_msg *req_msg)
136 return (u8) be32_to_cpu(req_msg->offset36);
139 static inline void cm_req_set_init_depth(struct cm_req_msg *req_msg,
140 u8 init_depth)
142 req_msg->offset36 = cpu_to_be32(init_depth |
143 (be32_to_cpu(req_msg->offset36) &
144 0xFFFFFF00));
147 static inline u8 cm_req_get_remote_resp_timeout(struct cm_req_msg *req_msg)
149 return (u8) ((be32_to_cpu(req_msg->offset40) & 0xF8) >> 3);
152 static inline void cm_req_set_remote_resp_timeout(struct cm_req_msg *req_msg,
153 u8 resp_timeout)
155 req_msg->offset40 = cpu_to_be32((resp_timeout << 3) |
156 (be32_to_cpu(req_msg->offset40) &
157 0xFFFFFF07));
160 static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
162 u8 transport_type = (u8) (be32_to_cpu(req_msg->offset40) & 0x06) >> 1;
163 switch(transport_type) {
164 case 0: return IB_QPT_RC;
165 case 1: return IB_QPT_UC;
166 case 3:
167 switch (req_msg->offset51 & 0x7) {
168 case 1: return IB_QPT_XRC_TGT;
169 default: return 0;
171 default: return 0;
175 static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
176 enum ib_qp_type qp_type)
178 switch(qp_type) {
179 case IB_QPT_UC:
180 req_msg->offset40 = cpu_to_be32((be32_to_cpu(
181 req_msg->offset40) &
182 0xFFFFFFF9) | 0x2);
183 break;
184 case IB_QPT_XRC_INI:
185 req_msg->offset40 = cpu_to_be32((be32_to_cpu(
186 req_msg->offset40) &
187 0xFFFFFFF9) | 0x6);
188 req_msg->offset51 = (req_msg->offset51 & 0xF8) | 1;
189 break;
190 default:
191 req_msg->offset40 = cpu_to_be32(be32_to_cpu(
192 req_msg->offset40) &
193 0xFFFFFFF9);
197 static inline u8 cm_req_get_flow_ctrl(struct cm_req_msg *req_msg)
199 return be32_to_cpu(req_msg->offset40) & 0x1;
202 static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg,
203 u8 flow_ctrl)
205 req_msg->offset40 = cpu_to_be32((flow_ctrl & 0x1) |
206 (be32_to_cpu(req_msg->offset40) &
207 0xFFFFFFFE));
210 static inline __be32 cm_req_get_starting_psn(struct cm_req_msg *req_msg)
212 return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8);
215 static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg,
216 __be32 starting_psn)
218 req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
219 (be32_to_cpu(req_msg->offset44) & 0x000000FF));
222 static inline u8 cm_req_get_local_resp_timeout(struct cm_req_msg *req_msg)
224 return (u8) ((be32_to_cpu(req_msg->offset44) & 0xF8) >> 3);
227 static inline void cm_req_set_local_resp_timeout(struct cm_req_msg *req_msg,
228 u8 resp_timeout)
230 req_msg->offset44 = cpu_to_be32((resp_timeout << 3) |
231 (be32_to_cpu(req_msg->offset44) & 0xFFFFFF07));
234 static inline u8 cm_req_get_retry_count(struct cm_req_msg *req_msg)
236 return (u8) (be32_to_cpu(req_msg->offset44) & 0x7);
239 static inline void cm_req_set_retry_count(struct cm_req_msg *req_msg,
240 u8 retry_count)
242 req_msg->offset44 = cpu_to_be32((retry_count & 0x7) |
243 (be32_to_cpu(req_msg->offset44) & 0xFFFFFFF8));
246 static inline u8 cm_req_get_path_mtu(struct cm_req_msg *req_msg)
248 return req_msg->offset50 >> 4;
251 static inline void cm_req_set_path_mtu(struct cm_req_msg *req_msg, u8 path_mtu)
253 req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF) | (path_mtu << 4));
256 static inline u8 cm_req_get_rnr_retry_count(struct cm_req_msg *req_msg)
258 return req_msg->offset50 & 0x7;
261 static inline void cm_req_set_rnr_retry_count(struct cm_req_msg *req_msg,
262 u8 rnr_retry_count)
264 req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF8) |
265 (rnr_retry_count & 0x7));
268 static inline u8 cm_req_get_max_cm_retries(struct cm_req_msg *req_msg)
270 return req_msg->offset51 >> 4;
273 static inline void cm_req_set_max_cm_retries(struct cm_req_msg *req_msg,
274 u8 retries)
276 req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF) | (retries << 4));
279 static inline u8 cm_req_get_srq(struct cm_req_msg *req_msg)
281 return (req_msg->offset51 & 0x8) >> 3;
284 static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq)
286 req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF7) |
287 ((srq & 0x1) << 3));
290 static inline __be32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg)
292 return cpu_to_be32(be32_to_cpu(req_msg->primary_offset88) >> 12);
295 static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg,
296 __be32 flow_label)
298 req_msg->primary_offset88 = cpu_to_be32(
299 (be32_to_cpu(req_msg->primary_offset88) &
300 0x00000FFF) |
301 (be32_to_cpu(flow_label) << 12));
304 static inline u8 cm_req_get_primary_packet_rate(struct cm_req_msg *req_msg)
306 return (u8) (be32_to_cpu(req_msg->primary_offset88) & 0x3F);
309 static inline void cm_req_set_primary_packet_rate(struct cm_req_msg *req_msg,
310 u8 rate)
312 req_msg->primary_offset88 = cpu_to_be32(
313 (be32_to_cpu(req_msg->primary_offset88) &
314 0xFFFFFFC0) | (rate & 0x3F));
317 static inline u8 cm_req_get_primary_sl(struct cm_req_msg *req_msg)
319 return (u8) (req_msg->primary_offset94 >> 4);
322 static inline void cm_req_set_primary_sl(struct cm_req_msg *req_msg, u8 sl)
324 req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0x0F) |
325 (sl << 4));
328 static inline u8 cm_req_get_primary_subnet_local(struct cm_req_msg *req_msg)
330 return (u8) ((req_msg->primary_offset94 & 0x08) >> 3);
333 static inline void cm_req_set_primary_subnet_local(struct cm_req_msg *req_msg,
334 u8 subnet_local)
336 req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0xF7) |
337 ((subnet_local & 0x1) << 3));
340 static inline u8 cm_req_get_primary_local_ack_timeout(struct cm_req_msg *req_msg)
342 return (u8) (req_msg->primary_offset95 >> 3);
345 static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_msg,
346 u8 local_ack_timeout)
348 req_msg->primary_offset95 = (u8) ((req_msg->primary_offset95 & 0x07) |
349 (local_ack_timeout << 3));
352 static inline __be32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg)
354 return cpu_to_be32(be32_to_cpu(req_msg->alt_offset132) >> 12);
357 static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg,
358 __be32 flow_label)
360 req_msg->alt_offset132 = cpu_to_be32(
361 (be32_to_cpu(req_msg->alt_offset132) &
362 0x00000FFF) |
363 (be32_to_cpu(flow_label) << 12));
366 static inline u8 cm_req_get_alt_packet_rate(struct cm_req_msg *req_msg)
368 return (u8) (be32_to_cpu(req_msg->alt_offset132) & 0x3F);
371 static inline void cm_req_set_alt_packet_rate(struct cm_req_msg *req_msg,
372 u8 rate)
374 req_msg->alt_offset132 = cpu_to_be32(
375 (be32_to_cpu(req_msg->alt_offset132) &
376 0xFFFFFFC0) | (rate & 0x3F));
379 static inline u8 cm_req_get_alt_sl(struct cm_req_msg *req_msg)
381 return (u8) (req_msg->alt_offset138 >> 4);
384 static inline void cm_req_set_alt_sl(struct cm_req_msg *req_msg, u8 sl)
386 req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0x0F) |
387 (sl << 4));
390 static inline u8 cm_req_get_alt_subnet_local(struct cm_req_msg *req_msg)
392 return (u8) ((req_msg->alt_offset138 & 0x08) >> 3);
395 static inline void cm_req_set_alt_subnet_local(struct cm_req_msg *req_msg,
396 u8 subnet_local)
398 req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0xF7) |
399 ((subnet_local & 0x1) << 3));
402 static inline u8 cm_req_get_alt_local_ack_timeout(struct cm_req_msg *req_msg)
404 return (u8) (req_msg->alt_offset139 >> 3);
407 static inline void cm_req_set_alt_local_ack_timeout(struct cm_req_msg *req_msg,
408 u8 local_ack_timeout)
410 req_msg->alt_offset139 = (u8) ((req_msg->alt_offset139 & 0x07) |
411 (local_ack_timeout << 3));
414 /* Message REJected or MRAed */
415 enum cm_msg_response {
416 CM_MSG_RESPONSE_REQ = 0x0,
417 CM_MSG_RESPONSE_REP = 0x1,
418 CM_MSG_RESPONSE_OTHER = 0x2
421 struct cm_mra_msg {
422 struct ib_mad_hdr hdr;
424 __be32 local_comm_id;
425 __be32 remote_comm_id;
426 /* message MRAed:2, rsvd:6 */
427 u8 offset8;
428 /* service timeout:5, rsvd:3 */
429 u8 offset9;
431 u8 private_data[IB_CM_MRA_PRIVATE_DATA_SIZE];
433 } __attribute__ ((packed));
435 static inline u8 cm_mra_get_msg_mraed(struct cm_mra_msg *mra_msg)
437 return (u8) (mra_msg->offset8 >> 6);
440 static inline void cm_mra_set_msg_mraed(struct cm_mra_msg *mra_msg, u8 msg)
442 mra_msg->offset8 = (u8) ((mra_msg->offset8 & 0x3F) | (msg << 6));
445 static inline u8 cm_mra_get_service_timeout(struct cm_mra_msg *mra_msg)
447 return (u8) (mra_msg->offset9 >> 3);
450 static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg,
451 u8 service_timeout)
453 mra_msg->offset9 = (u8) ((mra_msg->offset9 & 0x07) |
454 (service_timeout << 3));
457 struct cm_rej_msg {
458 struct ib_mad_hdr hdr;
460 __be32 local_comm_id;
461 __be32 remote_comm_id;
462 /* message REJected:2, rsvd:6 */
463 u8 offset8;
464 /* reject info length:7, rsvd:1. */
465 u8 offset9;
466 __be16 reason;
467 u8 ari[IB_CM_REJ_ARI_LENGTH];
469 u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE];
471 } __attribute__ ((packed));
473 static inline u8 cm_rej_get_msg_rejected(struct cm_rej_msg *rej_msg)
475 return (u8) (rej_msg->offset8 >> 6);
478 static inline void cm_rej_set_msg_rejected(struct cm_rej_msg *rej_msg, u8 msg)
480 rej_msg->offset8 = (u8) ((rej_msg->offset8 & 0x3F) | (msg << 6));
483 static inline u8 cm_rej_get_reject_info_len(struct cm_rej_msg *rej_msg)
485 return (u8) (rej_msg->offset9 >> 1);
488 static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg,
489 u8 len)
491 rej_msg->offset9 = (u8) ((rej_msg->offset9 & 0x1) | (len << 1));
494 struct cm_rep_msg {
495 struct ib_mad_hdr hdr;
497 __be32 local_comm_id;
498 __be32 remote_comm_id;
499 __be32 local_qkey;
500 /* local QPN:24, rsvd:8 */
501 __be32 offset12;
502 /* local EECN:24, rsvd:8 */
503 __be32 offset16;
504 /* starting PSN:24 rsvd:8 */
505 __be32 offset20;
506 u8 resp_resources;
507 u8 initiator_depth;
508 /* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */
509 u8 offset26;
510 /* RNR retry count:3, SRQ:1, rsvd:5 */
511 u8 offset27;
512 __be64 local_ca_guid;
514 u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE];
516 } __attribute__ ((packed));
518 static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
520 return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8);
523 static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn)
525 rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
526 (be32_to_cpu(rep_msg->offset12) & 0x000000FF));
529 static inline __be32 cm_rep_get_local_eecn(struct cm_rep_msg *rep_msg)
531 return cpu_to_be32(be32_to_cpu(rep_msg->offset16) >> 8);
534 static inline void cm_rep_set_local_eecn(struct cm_rep_msg *rep_msg, __be32 eecn)
536 rep_msg->offset16 = cpu_to_be32((be32_to_cpu(eecn) << 8) |
537 (be32_to_cpu(rep_msg->offset16) & 0x000000FF));
540 static inline __be32 cm_rep_get_qpn(struct cm_rep_msg *rep_msg, enum ib_qp_type qp_type)
542 return (qp_type == IB_QPT_XRC_INI) ?
543 cm_rep_get_local_eecn(rep_msg) : cm_rep_get_local_qpn(rep_msg);
546 static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
548 return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8);
551 static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg,
552 __be32 starting_psn)
554 rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
555 (be32_to_cpu(rep_msg->offset20) & 0x000000FF));
558 static inline u8 cm_rep_get_target_ack_delay(struct cm_rep_msg *rep_msg)
560 return (u8) (rep_msg->offset26 >> 3);
563 static inline void cm_rep_set_target_ack_delay(struct cm_rep_msg *rep_msg,
564 u8 target_ack_delay)
566 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0x07) |
567 (target_ack_delay << 3));
570 static inline u8 cm_rep_get_failover(struct cm_rep_msg *rep_msg)
572 return (u8) ((rep_msg->offset26 & 0x06) >> 1);
575 static inline void cm_rep_set_failover(struct cm_rep_msg *rep_msg, u8 failover)
577 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xF9) |
578 ((failover & 0x3) << 1));
581 static inline u8 cm_rep_get_flow_ctrl(struct cm_rep_msg *rep_msg)
583 return (u8) (rep_msg->offset26 & 0x01);
586 static inline void cm_rep_set_flow_ctrl(struct cm_rep_msg *rep_msg,
587 u8 flow_ctrl)
589 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xFE) |
590 (flow_ctrl & 0x1));
593 static inline u8 cm_rep_get_rnr_retry_count(struct cm_rep_msg *rep_msg)
595 return (u8) (rep_msg->offset27 >> 5);
598 static inline void cm_rep_set_rnr_retry_count(struct cm_rep_msg *rep_msg,
599 u8 rnr_retry_count)
601 rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0x1F) |
602 (rnr_retry_count << 5));
605 static inline u8 cm_rep_get_srq(struct cm_rep_msg *rep_msg)
607 return (u8) ((rep_msg->offset27 >> 4) & 0x1);
610 static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq)
612 rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0xEF) |
613 ((srq & 0x1) << 4));
616 struct cm_rtu_msg {
617 struct ib_mad_hdr hdr;
619 __be32 local_comm_id;
620 __be32 remote_comm_id;
622 u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE];
624 } __attribute__ ((packed));
626 struct cm_dreq_msg {
627 struct ib_mad_hdr hdr;
629 __be32 local_comm_id;
630 __be32 remote_comm_id;
631 /* remote QPN/EECN:24, rsvd:8 */
632 __be32 offset8;
634 u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE];
636 } __attribute__ ((packed));
638 static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
640 return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8);
643 static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn)
645 dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
646 (be32_to_cpu(dreq_msg->offset8) & 0x000000FF));
649 struct cm_drep_msg {
650 struct ib_mad_hdr hdr;
652 __be32 local_comm_id;
653 __be32 remote_comm_id;
655 u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE];
657 } __attribute__ ((packed));
659 struct cm_lap_msg {
660 struct ib_mad_hdr hdr;
662 __be32 local_comm_id;
663 __be32 remote_comm_id;
665 __be32 rsvd8;
666 /* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */
667 __be32 offset12;
668 __be32 rsvd16;
670 __be16 alt_local_lid;
671 __be16 alt_remote_lid;
672 union ib_gid alt_local_gid;
673 union ib_gid alt_remote_gid;
674 /* flow label:20, rsvd:4, traffic class:8 */
675 __be32 offset56;
676 u8 alt_hop_limit;
677 /* rsvd:2, packet rate:6 */
678 u8 offset61;
679 /* SL:4, subnet local:1, rsvd:3 */
680 u8 offset62;
681 /* local ACK timeout:5, rsvd:3 */
682 u8 offset63;
684 u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE];
685 } __attribute__ ((packed));
687 static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
689 return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8);
692 static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn)
694 lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
695 (be32_to_cpu(lap_msg->offset12) &
696 0x000000FF));
699 static inline u8 cm_lap_get_remote_resp_timeout(struct cm_lap_msg *lap_msg)
701 return (u8) ((be32_to_cpu(lap_msg->offset12) & 0xF8) >> 3);
704 static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg *lap_msg,
705 u8 resp_timeout)
707 lap_msg->offset12 = cpu_to_be32((resp_timeout << 3) |
708 (be32_to_cpu(lap_msg->offset12) &
709 0xFFFFFF07));
712 static inline __be32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg)
714 return cpu_to_be32(be32_to_cpu(lap_msg->offset56) >> 12);
717 static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg,
718 __be32 flow_label)
720 lap_msg->offset56 = cpu_to_be32(
721 (be32_to_cpu(lap_msg->offset56) & 0x00000FFF) |
722 (be32_to_cpu(flow_label) << 12));
725 static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg)
727 return (u8) be32_to_cpu(lap_msg->offset56);
730 static inline void cm_lap_set_traffic_class(struct cm_lap_msg *lap_msg,
731 u8 traffic_class)
733 lap_msg->offset56 = cpu_to_be32(traffic_class |
734 (be32_to_cpu(lap_msg->offset56) &
735 0xFFFFFF00));
738 static inline u8 cm_lap_get_packet_rate(struct cm_lap_msg *lap_msg)
740 return lap_msg->offset61 & 0x3F;
743 static inline void cm_lap_set_packet_rate(struct cm_lap_msg *lap_msg,
744 u8 packet_rate)
746 lap_msg->offset61 = (packet_rate & 0x3F) | (lap_msg->offset61 & 0xC0);
749 static inline u8 cm_lap_get_sl(struct cm_lap_msg *lap_msg)
751 return lap_msg->offset62 >> 4;
754 static inline void cm_lap_set_sl(struct cm_lap_msg *lap_msg, u8 sl)
756 lap_msg->offset62 = (sl << 4) | (lap_msg->offset62 & 0x0F);
759 static inline u8 cm_lap_get_subnet_local(struct cm_lap_msg *lap_msg)
761 return (lap_msg->offset62 >> 3) & 0x1;
764 static inline void cm_lap_set_subnet_local(struct cm_lap_msg *lap_msg,
765 u8 subnet_local)
767 lap_msg->offset62 = ((subnet_local & 0x1) << 3) |
768 (lap_msg->offset61 & 0xF7);
770 static inline u8 cm_lap_get_local_ack_timeout(struct cm_lap_msg *lap_msg)
772 return lap_msg->offset63 >> 3;
775 static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg *lap_msg,
776 u8 local_ack_timeout)
778 lap_msg->offset63 = (local_ack_timeout << 3) |
779 (lap_msg->offset63 & 0x07);
782 struct cm_apr_msg {
783 struct ib_mad_hdr hdr;
785 __be32 local_comm_id;
786 __be32 remote_comm_id;
788 u8 info_length;
789 u8 ap_status;
790 __be16 rsvd;
791 u8 info[IB_CM_APR_INFO_LENGTH];
793 u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE];
794 } __attribute__ ((packed));
796 struct cm_sidr_req_msg {
797 struct ib_mad_hdr hdr;
799 __be32 request_id;
800 __be16 pkey;
801 __be16 rsvd;
802 __be64 service_id;
804 u32 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
805 } __attribute__ ((packed));
807 struct cm_sidr_rep_msg {
808 struct ib_mad_hdr hdr;
810 __be32 request_id;
811 u8 status;
812 u8 info_length;
813 __be16 rsvd;
814 /* QPN:24, rsvd:8 */
815 __be32 offset8;
816 __be64 service_id;
817 __be32 qkey;
818 u8 info[IB_CM_SIDR_REP_INFO_LENGTH];
820 u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE];
821 } __attribute__ ((packed));
823 static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
825 return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8);
828 static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg,
829 __be32 qpn)
831 sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
832 (be32_to_cpu(sidr_rep_msg->offset8) &
833 0x000000FF));
836 #endif /* CM_MSGS_H */